code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __lowerCAmelCase : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['DPTFeatureExtractor'] __lowerCAmelCase : Union[str, Any] = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
107
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a : Optional[int] = logging.get_logger(__name__) # General docstring a : Union[str, Any] = '''MobileNetV1Config''' # Base docstring a : str = '''google/mobilenet_v1_1.0_224''' a : str = [1, 1024, 7, 7] # Image classification docstring a : Optional[Any] = '''google/mobilenet_v1_1.0_224''' a : Optional[int] = '''tabby, tabby cat''' a : List[str] = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : int=None ) ->int: '''simple docstring''' a : List[Any] = {} if isinstance(_lowercase , _lowercase ): a : Union[str, Any] = model.mobilenet_va else: a : List[str] = model a : Dict = "MobilenetV1/Conv2d_0/" a : Tuple = backbone.conv_stem.convolution.weight a : Dict = backbone.conv_stem.normalization.bias a : Optional[Any] = backbone.conv_stem.normalization.weight a : Optional[Any] = backbone.conv_stem.normalization.running_mean a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): a : List[str] = i + 1 a : Dict = i * 2 a : int = backbone.layer[pt_index] a : List[str] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" a : int = pointer.convolution.weight a : Union[str, Any] = pointer.normalization.bias a : Union[str, Any] = pointer.normalization.weight a : Optional[Any] = pointer.normalization.running_mean a : Dict = pointer.normalization.running_var a : List[Any] = backbone.layer[pt_index + 1] a : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" a : Dict = pointer.convolution.weight a : Optional[Any] = pointer.normalization.bias a : Dict = pointer.normalization.weight a : Optional[Any] = pointer.normalization.running_mean a : Optional[Any] = pointer.normalization.running_var if isinstance(_lowercase , _lowercase ): a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/" a : Tuple = model.classifier.weight a : Optional[int] = model.classifier.bias return tf_to_pt_map def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple ) ->int: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model a : List[Any] = tf.train.list_variables(_lowercase ) a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) a : Union[str, Any] = tf.train.load_variable(_lowercase , _lowercase ) a : Optional[Any] = array # Build TF to PyTorch weights loading map a : Tuple = _build_tf_to_pytorch_map(_lowercase , _lowercase , _lowercase ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue a : List[str] = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) a : List[Any] = np.transpose(_lowercase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer a : Union[str, Any] = array.squeeze().transpose() else: a : Any = np.transpose(_lowercase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) a : str = torch.from_numpy(_lowercase ) tf_weights.pop(_lowercase , _lowercase ) tf_weights.pop(name + "/RMSProp" , _lowercase ) tf_weights.pop(name + "/RMSProp_1" , _lowercase ) tf_weights.pop(name + "/ExponentialMovingAverage" , _lowercase ) logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" ) return model def _SCREAMING_SNAKE_CASE ( _lowercase : torch.Tensor , _lowercase : nn.Convad ) ->torch.Tensor: '''simple docstring''' a, a : Any = features.shape[-2:] a, a : Dict = conv_layer.stride a, a : int = conv_layer.kernel_size if in_height % stride_height == 0: a : Tuple = max(kernel_height - stride_height , 0 ) else: a : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: a : Optional[Any] = max(kernel_width - stride_width , 0 ) else: a : str = max(kernel_width - (in_width % stride_width) , 0 ) a : Any = pad_along_width // 2 a : List[str] = pad_along_width - pad_left a : List[str] = pad_along_height // 2 a : List[Any] = pad_along_height - pad_top a : int = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_lowercase , _lowercase , "constant" , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None: super().__init__() a : str = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) a : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) a : Tuple = nn.Convad( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , ) if use_normalization: a : Optional[int] = nn.BatchNormad( num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , ) else: a : int = None if use_activation: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): a : Optional[int] = ACTaFN[use_activation] elif isinstance(config.hidden_act , lowerCAmelCase__ ): a : Dict = ACTaFN[config.hidden_act] else: a : Union[str, Any] = config.hidden_act else: a : int = None def __a ( self , lowerCAmelCase__ ) -> torch.Tensor: if self.config.tf_padding: a : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution ) a : List[str] = self.convolution(lowerCAmelCase__ ) if self.normalization is not None: a : int = self.normalization(lowerCAmelCase__ ) if self.activation is not None: a : Dict = self.activation(lowerCAmelCase__ ) return features class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] =MobileNetVaConfig lowerCamelCase : str =load_tf_weights_in_mobilenet_va lowerCamelCase : List[str] ="""mobilenet_v1""" lowerCamelCase : Tuple ="""pixel_values""" lowerCamelCase : Optional[Any] =False def __a ( self , lowerCAmelCase__ ) -> None: if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowerCAmelCase__ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) a : Optional[Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' a : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , ) class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> List[str]: super().__init__(lowerCAmelCase__ ) a : Tuple = config a : Dict = 32 a : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth ) a : Dict = MobileNetVaConvLayer( lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , ) a : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] a : int = nn.ModuleList() for i in range(13 ): a : Optional[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 a : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) ) self.layer.append( MobileNetVaConvLayer( lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) ) a : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: raise NotImplementedError @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) a : List[str] = self.conv_stem(lowerCAmelCase__ ) a : Dict = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): a : List[Any] = layer_module(lowerCAmelCase__ ) if output_hidden_states: a : Optional[Any] = all_hidden_states + (hidden_states,) a : Any = hidden_states if self.pooler is not None: a : Union[str, Any] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 ) else: a : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) @add_start_docstrings( """ MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , a__ , ) class __UpperCamelCase ( a__ ): def __init__( self , lowerCAmelCase__ ) -> None: super().__init__(lowerCAmelCase__ ) a : int = config.num_labels a : List[Any] = MobileNetVaModel(lowerCAmelCase__ ) a : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head a : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ ) a : str = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict a : Any = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) a : Optional[int] = outputs.pooler_output if return_dict else outputs[1] a : Tuple = self.classifier(self.dropout(lowerCAmelCase__ ) ) a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a : List[Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a : Any = "single_label_classification" else: a : int = "multi_label_classification" if self.config.problem_type == "regression": a : Tuple = MSELoss() if self.num_labels == 1: a : Dict = loss_fct(logits.squeeze() , labels.squeeze() ) else: a : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": a : List[Any] = CrossEntropyLoss() a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": a : int = BCEWithLogitsLoss() a : Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: a : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
105
0
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( a_ : Optional[int] , a_ : Any , a_ : Optional[Any] , a_ : Tuple ) -> str: # Initialise PyTorch model __SCREAMING_SNAKE_CASE :Union[str, Any] = FunnelConfig.from_json_file(a_ ) print(f'''Building PyTorch model from configuration: {config}''' ) __SCREAMING_SNAKE_CASE :str = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(a_ , a_ , a_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) lowerCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
239
"""simple docstring""" from __future__ import annotations import math def __lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(a_ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , ) return min( minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , ) def __lowerCamelCase ( ) -> None: __SCREAMING_SNAKE_CASE :Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] __SCREAMING_SNAKE_CASE :Optional[int] = math.log(len(a_ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , a_ , a_ , a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
239
1
from manim import * class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : List[Any] = Rectangle(height=0.5 , width=0.5) a__ : str = Rectangle(height=0.46 , width=0.46).set_stroke(width=0) a__ : Any = Rectangle(height=0.25 , width=0.25) a__ : List[Any] = [mem.copy() for i in range(6)] a__ : Optional[Any] = [mem.copy() for i in range(6)] a__ : Tuple = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0) a__ : List[str] = Text('CPU' , font_size=24) a__ : Optional[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase) cpu.move_to([-2.5, -0.5, 0]) self.add(lowercase) a__ : Dict = [mem.copy() for i in range(4)] a__ : Optional[Any] = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : str = Text('GPU' , font_size=24) a__ : Any = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase) gpu.move_to([-1, -1, 0]) self.add(lowercase) a__ : Any = [mem.copy() for i in range(6)] a__ : List[str] = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : int = Text('Model' , font_size=24) a__ : List[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase) model.move_to([3, -1.0, 0]) self.add(lowercase) a__ : Dict = [] a__ : str = [] for i, rect in enumerate(lowercase): a__ : Tuple = fill.copy().set_fill(lowercase , opacity=0.8) target.move_to(lowercase) model_arr.append(lowercase) a__ : int = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase , opacity=0.8) cpu_target.move_to(cpu_left_col_base[i]) model_cpu_arr.append(lowercase) self.add(*lowercase , *lowercase) a__ : Dict = [meta_mem.copy() for i in range(6)] a__ : List[Any] = [meta_mem.copy() for i in range(6)] a__ : Union[str, Any] = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0) a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0) a__ : Dict = Text('Disk' , font_size=24) a__ : Optional[int] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase) disk.move_to([-4, -1.25, 0]) self.add(lowercase , lowercase) a__ : List[str] = Square(side_length=2.2) key.move_to([-5, 2, 0]) a__ : List[Any] = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) self.add(lowercase , lowercase) a__ : Dict = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left()) self.add(lowercase) a__ : Dict = MarkupText( F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , ) step_a.move_to([2, 2, 0]) self.play(Write(lowercase)) a__ : int = Square(0.3) input.set_fill(lowercase , opacity=1.0) input.set_stroke(width=0.0) input.next_to(model_base[0] , lowercase , buff=0.5) self.play(Write(lowercase)) input.generate_target() input.target.next_to(model_arr[0] , direction=lowercase , buff=0.02) self.play(MoveToTarget(lowercase)) self.play(FadeOut(lowercase)) a__ : str = Arrow(start=lowercase , end=lowercase , color=lowercase , buff=0.5) a.next_to(model_arr[0].get_left() , lowercase , buff=0.2) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0]) a__ : Union[str, Any] = MarkupText( F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , ) step_a.move_to([2, 2, 0]) self.play(Write(lowercase , run_time=3)) a__ : Union[str, Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(lowercase) , Circumscribe(model_arr[0] , color=lowercase , **lowercase) , Circumscribe(model_cpu_arr[0] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , ) self.play(MoveToTarget(model_cpu_arr[0])) a__ : List[Any] = a.copy() for i in range(6): a_c.next_to(model_arr[i].get_right() + 0.02 , lowercase , buff=0.2) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02) a__ : Any = AnimationGroup( FadeOut(lowercase , run_time=0.5) , MoveToTarget(lowercase , run_time=0.5) , FadeIn(lowercase , run_time=0.5) , lag_ratio=0.2) self.play(lowercase) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i]) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0]) if i >= 1: a__ : Tuple = 0.7 self.play( Circumscribe(model_arr[i] , **lowercase) , Circumscribe(cpu_left_col_base[i] , **lowercase) , Circumscribe(cpu_left_col_base[i + 1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , Circumscribe(model_arr[i + 1] , color=lowercase , **lowercase) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1]) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2) self.play( Circumscribe(model_arr[-1] , color=lowercase , **lowercase) , Circumscribe(cpu_left_col_base[-1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , ) self.play(MoveToTarget(model_cpu_arr[i])) a__ : Optional[int] = a_c a__ : str = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5) self.play( FadeOut(lowercase) , FadeOut(lowercase , run_time=0.5) , ) a__ : List[Any] = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24) step_a.move_to([2, 2, 0]) self.play(Write(lowercase , run_time=3) , MoveToTarget(lowercase)) self.wait()
99
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } lowerCAmelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" for attribute in key.split('''.''' ): __lowercase = getattr(A__ , A__ ) if weight_type is not None: __lowercase = getattr(A__ , A__ ).shape else: __lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _A ( A__ , A__ ): """simple docstring""" __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase = True else: for key, mapped_key in MAPPING.items(): __lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowercase = True if "*" in mapped_key: __lowercase = name.split(A__ )[0].split('''.''' )[-2] __lowercase = mapped_key.replace('''*''' , A__ ) if "weight_g" in name: __lowercase = '''weight_g''' elif "weight_v" in name: __lowercase = '''weight_v''' elif "bias" in name: __lowercase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase = '''weight''' else: __lowercase = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"Unused weights: {unused_weights}" ) def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = full_name.split('''conv_layers.''' )[-1] __lowercase = name.split('''.''' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(A__ ) @torch.no_grad() def _A ( A__ , A__ , A__=None , A__=None , A__=True ): """simple docstring""" if config_path is not None: __lowercase = UniSpeechSatConfig.from_pretrained(A__ ) else: __lowercase = UniSpeechSatConfig() __lowercase = '''''' if is_finetuned: __lowercase = UniSpeechSatForCTC(A__ ) else: __lowercase = UniSpeechSatForPreTraining(A__ ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowercase = model[0].eval() recursively_load_weights(A__ , A__ ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
104
0
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowerCamelCase ( lowerCAmelCase : Any ): """simple docstring""" __magic_name__ : Dict = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[str] ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = emb.weight.shape __magic_name__ : Any = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase ) __magic_name__ : Optional[Any] = emb.weight.data return lin_layer def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=None ): """simple docstring""" __magic_name__ : Dict = {} for old_key in state_dict.keys(): __magic_name__ : Any = old_key if "moe_layer.experts." in key: if expert_idx is not None: __magic_name__ : Optional[Any] = key.replace('moe_layer.experts.0' , f'ffn.experts.expert_{expert_idx}' ) else: __magic_name__ : int = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: __magic_name__ : List[str] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: __magic_name__ : Union[str, Any] = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: __magic_name__ : Tuple = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: __magic_name__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: __magic_name__ : Dict = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: __magic_name__ : Any = key.replace('final_layer_norm' , 'ff_layer_norm' ) __magic_name__ : Optional[Any] = state_dict[old_key] return new_dict def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str = WEIGHTS_NAME ): """simple docstring""" __magic_name__ : List[str] = [] __magic_name__ : Union[str, Any] = 0 os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) for expert in range(lowerCAmelCase ): __magic_name__ : List[str] = switch_checkpoint_path + f'-rank-{expert}.pt' if os.path.isfile(lowerCAmelCase ): __magic_name__ : int = torch.load(lowerCAmelCase )['model'] remove_ignore_keys_(lowerCAmelCase ) __magic_name__ : Optional[Any] = rename_fairseq_keys(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Dict = os.path.join( lowerCAmelCase , weights_name.replace('.bin' , f'-{len(lowerCAmelCase )+1:05d}-of-???.bin' ) ) torch.save(lowerCAmelCase , lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(lowerCAmelCase )[0]].dtype ) # Add the last block __magic_name__ : Dict = os.path.join(lowerCAmelCase , weights_name.replace('.bin' , f'-{len(lowerCAmelCase )+1:05d}-of-???.bin' ) ) __magic_name__ : Optional[Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(lowerCAmelCase ) __magic_name__ : Tuple = rename_fairseq_keys(lowerCAmelCase , lowerCAmelCase ) __magic_name__ : Tuple = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(lowerCAmelCase ) == 1: __magic_name__ : Dict = os.path.join(lowerCAmelCase , lowerCAmelCase ) torch.save(lowerCAmelCase , lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(lowerCAmelCase , lowerCAmelCase ) # Otherwise, let's build the index __magic_name__ : List[str] = {} for idx, shard in enumerate(lowerCAmelCase ): __magic_name__ : Union[str, Any] = weights_name.replace('.bin' , f'-{idx+1:05d}-of-{len(lowerCAmelCase ):05d}.bin' ) __magic_name__ : List[str] = os.path.join(lowerCAmelCase , weights_name.replace('.bin' , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) ) for key in shard: __magic_name__ : int = shard_file # Add the metadata __magic_name__ : str = {'total_size': total_size} __magic_name__ : Union[str, Any] = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' , encoding='utf-8' ) as f: __magic_name__ : Union[str, Any] = json.dumps(lowerCAmelCase , indent=2 , sort_keys=lowerCAmelCase ) + '\n' f.write(lowerCAmelCase ) return metadata, index if __name__ == "__main__": lowerCAmelCase :Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) lowerCAmelCase :List[str] = parser.parse_args() lowerCAmelCase , lowerCAmelCase :str = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) lowerCAmelCase :Dict = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCAmelCase :List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
275
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Any = """Salesforce/blip-image-captioning-base""" A_ : Any = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) A_ : List[str] = """image_captioner""" A_ : Optional[Any] = AutoModelForVisionaSeq A_ : Any = ["""image"""] A_ : Union[str, Any] = ["""text"""] def __init__( self : Dict , *_A : Dict , **_A : Any ) -> Dict: requires_backends(self , ['vision'] ) super().__init__(*_A , **_A ) def __lowerCAmelCase ( self : Tuple , _A : "Image" ) -> Union[str, Any]: return self.pre_processor(images=_A , return_tensors='pt' ) def __lowerCAmelCase ( self : Optional[Any] , _A : str ) -> List[Any]: return self.model.generate(**_A ) def __lowerCAmelCase ( self : int , _A : Optional[int] ) -> Union[str, Any]: return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
275
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): if index == number_of_items: return 0 __UpperCamelCase : Dict = 0 __UpperCamelCase : Union[str, Any] = 0 __UpperCamelCase : Tuple = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: __UpperCamelCase : Dict = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = AlbertTokenizer __UpperCamelCase = AlbertTokenizerFast __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True def UpperCAmelCase__ ( self :Tuple ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] ) -> List[Any]: UpperCAmelCase = 'this is a test' UpperCAmelCase = 'this is a test' return input_text, output_text def UpperCAmelCase__ ( self :List[str] ) -> List[Any]: UpperCAmelCase = '<pad>' UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]: UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(lowercase_ ) , 3_00_00 ) def UpperCAmelCase__ ( self :int ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def UpperCAmelCase__ ( self :str ) -> str: if not self.test_rust_tokenizer: return UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = 'I was born in 92000, and this is falsé.' UpperCAmelCase = tokenizer.tokenize(lowercase_ ) UpperCAmelCase = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(lowercase_ ) UpperCAmelCase = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]: UpperCAmelCase = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ ) UpperCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 12_89] ) UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]: UpperCAmelCase = AlbertTokenizer(lowercase_ ) UpperCAmelCase = tokenizer.encode('sequence builders' ) UpperCAmelCase = tokenizer.encode('multi-sequence build' ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def UpperCAmelCase__ ( self :int ) -> List[str]: UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
369
"""simple docstring""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging snake_case_ = logging.get_logger(__name__) def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = R'\w+[.]\d+' UpperCAmelCase = re.findall(lowercase_ , lowercase_ ) for pat in pats: UpperCAmelCase = key.replace(lowercase_ , '_'.join(pat.split('.' ) ) ) return key def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = pt_tuple_key[:-1] + ('scale',) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": UpperCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=42 ): # Step 1: Convert pytorch tensor to numpy UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase = flax_model.init_weights(PRNGKey(lowercase_ ) ) UpperCAmelCase = flatten_dict(lowercase_ ) UpperCAmelCase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase = rename_key(lowercase_ ) UpperCAmelCase = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(lowercase_ , lowercase_ , lowercase_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown UpperCAmelCase = jnp.asarray(lowercase_ ) return unflatten_dict(lowercase_ )
181
0
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
64
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ): def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str: UpperCamelCase_ : Tuple = [] UpperCamelCase_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCamelCase_ : int = int(max(0 , i - limit ) ) UpperCamelCase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(lowerCamelCase ) UpperCamelCase_ : Dict = F"{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}" return "".join(lowerCamelCase ) # matching characters UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase ) UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase ) UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase ) # transposition UpperCamelCase_ : int = ( len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2 ) if not match_count: UpperCamelCase_ : Union[str, Any] = 0.0 else: UpperCamelCase_ : str = ( 1 / 3 * ( match_count / len(lowerCamelCase ) + match_count / len(lowerCamelCase ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCamelCase_ : Dict = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
175
0
"""simple docstring""" import numpy as np __magic_name__ = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self): __SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = np.where(letter == self.SQUARE) __SCREAMING_SNAKE_CASE = np.concatenate([indexa + 1, indexa + 1]) return indexes def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.SQUARE[indexa - 1, indexa - 1] return letter def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = message.lower() __SCREAMING_SNAKE_CASE = message.replace(""" """ , """""") __SCREAMING_SNAKE_CASE = message.replace("""j""" , """i""") __SCREAMING_SNAKE_CASE = np.empty((2, len(lowerCAmelCase__))) for letter_index in range(len(lowerCAmelCase__)): __SCREAMING_SNAKE_CASE = self.letter_to_numbers(message[letter_index]) __SCREAMING_SNAKE_CASE = numbers[0] __SCREAMING_SNAKE_CASE = numbers[1] __SCREAMING_SNAKE_CASE = first_step.reshape(2 * len(lowerCAmelCase__)) __SCREAMING_SNAKE_CASE = """""" for numbers_index in range(len(lowerCAmelCase__)): __SCREAMING_SNAKE_CASE = int(second_step[numbers_index * 2]) __SCREAMING_SNAKE_CASE = int(second_step[(numbers_index * 2) + 1]) __SCREAMING_SNAKE_CASE = self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = encoded_message + letter return encoded_message def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = message.lower() message.replace(""" """ , """""") __SCREAMING_SNAKE_CASE = np.empty(2 * len(lowerCAmelCase__)) for letter_index in range(len(lowerCAmelCase__)): __SCREAMING_SNAKE_CASE = self.letter_to_numbers(message[letter_index]) __SCREAMING_SNAKE_CASE = numbers[0] __SCREAMING_SNAKE_CASE = numbers[1] __SCREAMING_SNAKE_CASE = first_step.reshape((2, len(lowerCAmelCase__))) __SCREAMING_SNAKE_CASE = """""" for numbers_index in range(len(lowerCAmelCase__)): __SCREAMING_SNAKE_CASE = int(second_step[0, numbers_index]) __SCREAMING_SNAKE_CASE = int(second_step[1, numbers_index]) __SCREAMING_SNAKE_CASE = self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = decoded_message + letter return decoded_message
255
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __magic_name__ = False class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case_ ( self): return 1_2 @property def snake_case_ ( self): return 1_2 @property def snake_case_ ( self): return 3_2 @property def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def snake_case_ ( self): __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") return tokenizer @property def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowerCAmelCase__) @property def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = 1_2 __SCREAMING_SNAKE_CASE = 1_2 __SCREAMING_SNAKE_CASE = { """attention_bias""": True, """cross_attention_dim""": 3_2, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 3_2, """sample_size""": width, """activation_fn""": """geglu-approximate""", } __SCREAMING_SNAKE_CASE = TransformeraDModel(**lowerCAmelCase__) return model def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" __SCREAMING_SNAKE_CASE = self.dummy_vqvae __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = self.dummy_tokenizer __SCREAMING_SNAKE_CASE = self.dummy_transformer __SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed) __SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = VQDiffusionPipeline( vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = """teddy bear playing in the pool""" __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0) __SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""") __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0) __SCREAMING_SNAKE_CASE = pipe( [prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __SCREAMING_SNAKE_CASE = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" __SCREAMING_SNAKE_CASE = self.dummy_vqvae __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = self.dummy_tokenizer __SCREAMING_SNAKE_CASE = self.dummy_transformer __SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed) __SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) __SCREAMING_SNAKE_CASE = VQDiffusionPipeline( vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = """teddy bear playing in the pool""" __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0) __SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""") __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0) __SCREAMING_SNAKE_CASE = pipe( [prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __SCREAMING_SNAKE_CASE = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""") __SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""") __SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__) pipeline.set_progress_bar_config(disable=lowerCAmelCase__) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0) __SCREAMING_SNAKE_CASE = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image).max() < 2.0
255
1
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : str = "utf-8" __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True # deprecated __UpperCAmelCase : Optional[int] = None # deprecated __UpperCAmelCase : int = 1_0 << 2_0 # 10MB __UpperCAmelCase : Optional[bool] = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = JsonConfig def __UpperCAmelCase ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) __a = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self , _a ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): __a = data_files if isinstance(_a , _a ): __a = [files] __a = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __a = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): __a = [files] __a = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __UpperCAmelCase ( self , _a ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): __a = self.config.features.arrow_schema.field(_a ).type __a = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example __a = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __UpperCAmelCase ( self , _a ): for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: __a = json.load(_a ) # We keep only the field we are interested in __a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_a , (list, tuple) ): __a = set().union(*[row.keys() for row in dataset] ) __a = {col: [row.get(_a ) for row in dataset] for col in keys} else: __a = dataset __a = pa.Table.from_pydict(_a ) yield file_idx, self._cast_table(_a ) # If the file has one json object per line else: with open(_a , '''rb''' ) as f: __a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small __a = max(self.config.chunksize // 32 , 16 << 10 ) __a = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: __a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_a ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": __a = batch.decode(self.config.encoding , errors=_a ).encode('''utf-8''' ) try: while True: try: __a = paj.read_json( io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_a , pa.ArrowInvalid ) and "straddling" not in str(_a ) or block_size > len(_a ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: __a = json.load(_a ) except json.JSONDecodeError: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_a , _a ): # list is the only sequence type supported in JSON try: __a = set().union(*[row.keys() for row in dataset] ) __a = {col: [row.get(_a ) for row in dataset] for col in keys} __a = pa.Table.from_pydict(_a ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(_a ) break else: logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' ) raise ValueError( f'''Not able to read records in the JSON file at {file}. ''' f'''You should probably indicate the field of the JSON file containing your records. ''' f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_a ) batch_idx += 1
45
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
'''simple docstring''' import pprint import requests a : Any = 'https://zenquotes.io/api' def __magic_name__ ( ) -> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def __magic_name__ ( ) -> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": a : Optional[Any] = random_quotes() pprint.pprint(response)
363
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a : Any = get_logger(__name__) a : Union[str, Any] = Path(__file__).parent / 'model_card_template.md' a : List[Any] = uuida().hex a : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES a : str = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES a : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __magic_name__ ( __UpperCAmelCase = None ) -> str: '''simple docstring''' snake_case_ = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F"; torch/{_torch_version}" if is_flax_available(): ua += F"; jax/{_jax_version}" ua += F"; flax/{_flax_version}" if is_onnx_available(): ua += F"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + user_agent return ua def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if token is None: snake_case_ = HfFolder.get_token() if organization is None: snake_case_ = whoami(__UpperCAmelCase )['''name'''] return F"{username}/{model_id}" else: return F"{organization}/{model_id}" def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(__UpperCAmelCase, '''local_rank''' ) and args.local_rank not in [-1, 0]: return snake_case_ = args.hub_token if hasattr(__UpperCAmelCase, '''hub_token''' ) else None snake_case_ = get_full_repo_name(__UpperCAmelCase, token=__UpperCAmelCase ) snake_case_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__UpperCAmelCase, model_name=__UpperCAmelCase, repo_name=__UpperCAmelCase, dataset_name=args.dataset_name if hasattr(__UpperCAmelCase, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCAmelCase, '''gradient_accumulation_steps''' ) else None ), adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__UpperCAmelCase, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, ) snake_case_ = os.path.join(args.output_dir, '''README.md''' ) model_card.save(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ = str(Path(__UpperCAmelCase ).as_posix() ) snake_case_ = re.search(r'''snapshots/([^/]+)/''', __UpperCAmelCase ) if search is None: return None snake_case_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a : str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) a : Optional[Any] = os.path.join(hf_cache_home, 'diffusers') def __magic_name__ ( __UpperCAmelCase = None, __UpperCAmelCase = None ) -> None: '''simple docstring''' if new_cache_dir is None: snake_case_ = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ = old_diffusers_cache snake_case_ = Path(__UpperCAmelCase ).expanduser() snake_case_ = Path(__UpperCAmelCase ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase ) new_blob_path.parent.mkdir(parents=__UpperCAmelCase, exist_ok=__UpperCAmelCase ) os.replace(__UpperCAmelCase, __UpperCAmelCase ) try: os.symlink(__UpperCAmelCase, __UpperCAmelCase ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a : Tuple = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): a : Tuple = 0 else: with open(cache_version_file) as f: try: a : Optional[Any] = int(f.read()) except ValueError: a : List[str] = 0 if cache_version < 1: a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: a : str = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' 'the directory exists and can be written to.' ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> str: '''simple docstring''' if variant is not None: snake_case_ = weights_name.split('''.''' ) snake_case_ = splits[:-1] + [variant] + splits[-1:] snake_case_ = '''.'''.join(__UpperCAmelCase ) return weights_name def __magic_name__ ( __UpperCAmelCase, *, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, ) -> int: '''simple docstring''' snake_case_ = str(__UpperCAmelCase ) if os.path.isfile(__UpperCAmelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCAmelCase ): if os.path.isfile(os.path.join(__UpperCAmelCase, __UpperCAmelCase ) ): # Load from a PyTorch checkpoint snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ): snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) return model_file else: raise EnvironmentError( F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' ) ): try: snake_case_ = hf_hub_download( __UpperCAmelCase, filename=_add_variant(__UpperCAmelCase, __UpperCAmelCase ), cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) warnings.warn( F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", __UpperCAmelCase, ) return model_file except: # noqa: E722 warnings.warn( F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase, __UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase, __UpperCAmelCase )}' so that the correct variant file can be added.", __UpperCAmelCase, ) try: # 2. Load model file as usual snake_case_ = hf_hub_download( __UpperCAmelCase, filename=__UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " '''this model name. Check the model page at ''' F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" F" directory containing a file named {weights_name} or" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " F"containing a file named {weights_name}" )
72
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ShapEImgaImgPipeline __UpperCAmelCase : Any = ['''image'''] __UpperCAmelCase : Optional[Any] = ['''image'''] __UpperCAmelCase : List[str] = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] __UpperCAmelCase : List[str] = False @property def __lowercase ( self : Optional[Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return 32 @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Tuple ): '''simple docstring''' return 8 @property def __lowercase ( self : int ): '''simple docstring''' torch.manual_seed(0 ) _a : Optional[int] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,) _a : int = CLIPVisionModel(_a ) return model @property def __lowercase ( self : Optional[int] ): '''simple docstring''' _a : Dict = CLIPImageProcessor( crop_size=224 ,do_center_crop=_a ,do_normalize=_a ,do_resize=_a ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,) return image_processor @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _a : str = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } _a : Tuple = PriorTransformer(**_a ) return model @property def __lowercase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) _a : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } _a : List[str] = ShapERenderer(**_a ) return model def __lowercase ( self : List[str] ): '''simple docstring''' _a : int = self.dummy_prior _a : Optional[int] = self.dummy_image_encoder _a : Dict = self.dummy_image_processor _a : List[Any] = self.dummy_renderer _a : Union[str, Any] = HeunDiscreteScheduler( beta_schedule='exp' ,num_train_timesteps=1024 ,prediction_type='sample' ,use_karras_sigmas=_a ,clip_sample=_a ,clip_sample_range=1.0 ,) _a : Optional[int] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowercase ( self : Optional[int] ,_a : Tuple ,_a : str=0 ): '''simple docstring''' _a : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith('mps' ): _a : Optional[Any] = torch.manual_seed(_a ) else: _a : str = torch.Generator(device=_a ).manual_seed(_a ) _a : str = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowercase ( self : Tuple ): '''simple docstring''' _a : str = 'cpu' _a : List[str] = self.get_dummy_components() _a : int = self.pipeline_class(**_a ) _a : int = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _a : List[str] = pipe(**self.get_dummy_inputs(_a ) ) _a : List[Any] = output.images[0] _a : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _a : Any = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self : Union[str, Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowercase ( self : Tuple ): '''simple docstring''' _a : int = torch_device == 'cpu' _a : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=_a ,relax_max_difference=_a ,) def __lowercase ( self : Any ): '''simple docstring''' _a : List[str] = self.get_dummy_components() _a : List[str] = self.pipeline_class(**_a ) _a : Any = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _a : str = 1 _a : Dict = 2 _a : List[str] = self.get_dummy_inputs(_a ) for key in inputs.keys(): if key in self.batch_params: _a : Optional[int] = batch_size * [inputs[key]] _a : Union[str, Any] = pipe(**_a ,num_images_per_prompt=_a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : Tuple ): '''simple docstring''' _a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) _a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) _a : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) _a : List[Any] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _a : List[str] = torch.Generator(device=_a ).manual_seed(0 ) _a : int = pipe( _a ,generator=_a ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_a ,_a )
271
'''simple docstring''' def UpperCAmelCase_ (__a : list , __a : list , __a : int ): """simple docstring""" _a : Optional[Any] = len(__a ) _a : int = [[0] * n for i in range(__a )] for i in range(__a ): _a : Tuple = y_points[i] for i in range(2 , __a ): for j in range(__a , __a ): _a : Tuple = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
271
1
"""simple docstring""" import heapq import sys import numpy as np SCREAMING_SNAKE_CASE : List[str] = tuple[int, int] class __lowerCamelCase : def __init__(self ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = set() def A__ (self ): '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def A__ (self ): '''simple docstring''' return len(self.elements ) == 0 def A__ (self , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(UpperCamelCase__ ) else: # update # print("update", item) _lowerCAmelCase = [] ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def A__ (self , lowerCamelCase ): '''simple docstring''' if item in self.set: self.set.remove(UpperCamelCase__ ) _lowerCAmelCase = [] ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def A__ (self ): '''simple docstring''' return self.elements[0][1] def A__ (self ): '''simple docstring''' ((_lowerCAmelCase) , (_lowerCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(UpperCamelCase__ ) return (priority, item) def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> List[str]: """simple docstring""" _lowerCAmelCase = np.array(__UpperCamelCase ) _lowerCAmelCase = np.array(__UpperCamelCase ) return np.linalg.norm(a - b ) def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> str: """simple docstring""" return consistent_heuristic(__UpperCamelCase , __UpperCamelCase ) // t def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos ) -> Dict: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : int , snake_case_ : TPos , snake_case_ : dict[TPos, float] ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = g_function[start] + Wa * heuristics[i](__UpperCamelCase , __UpperCamelCase ) return ans def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = np.chararray((n, n) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): _lowerCAmelCase = """*""" for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if (j, (n - 1) - i) in blocks: _lowerCAmelCase = """#""" _lowerCAmelCase = """-""" _lowerCAmelCase = back_pointer[goal] while x != start: ((_lowerCAmelCase) , (_lowerCAmelCase)) = x # print(x) _lowerCAmelCase = """-""" _lowerCAmelCase = back_pointer[x] _lowerCAmelCase = """-""" for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) _lowerCAmelCase = back_pointer[goal] while x != start: print(__UpperCamelCase , end=""" """ ) _lowerCAmelCase = back_pointer[x] print(__UpperCamelCase ) sys.exit() def __UpperCAmelCase ( snake_case_ : TPos ) -> int: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , ) -> Tuple: """simple docstring""" for itera in range(__UpperCamelCase ): open_list[itera].remove_element(__UpperCamelCase ) # print("s", s) # print("j", j) ((_lowerCAmelCase) , (_lowerCAmelCase)) = s _lowerCAmelCase = (x - 1, y) _lowerCAmelCase = (x + 1, y) _lowerCAmelCase = (x, y + 1) _lowerCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__UpperCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__UpperCamelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = float("""inf""" ) if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1: _lowerCAmelCase = g_function[s] + 1 _lowerCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(__UpperCamelCase , key(__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , __UpperCamelCase ): if key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) <= Wa * key( __UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ): open_list[j].put( __UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list SCREAMING_SNAKE_CASE : List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} SCREAMING_SNAKE_CASE : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] SCREAMING_SNAKE_CASE : List[str] = make_common_ground() SCREAMING_SNAKE_CASE : Any = blocks_blk # hyper parameters SCREAMING_SNAKE_CASE : Tuple = 1 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : List[str] = 2_0 SCREAMING_SNAKE_CASE : Optional[int] = 3 # one consistent and two other inconsistent # start and end destination SCREAMING_SNAKE_CASE : Optional[Any] = (0, 0) SCREAMING_SNAKE_CASE : Dict = (n - 1, n - 1) SCREAMING_SNAKE_CASE : Dict = 1 def __UpperCAmelCase ( snake_case_ : TPos , snake_case_ : TPos , snake_case_ : int ) -> str: """simple docstring""" _lowerCAmelCase = {start: 0, goal: float("""inf""" )} _lowerCAmelCase = {start: -1, goal: -1} _lowerCAmelCase = [] _lowerCAmelCase = set() for i in range(__UpperCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) _lowerCAmelCase = [] _lowerCAmelCase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , __UpperCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: _lowerCAmelCase , _lowerCAmelCase = open_list[i].top_show() visited.add(__UpperCamelCase ) expand_state( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) close_list_inad.append(__UpperCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: _lowerCAmelCase = open_list[0].top_show() visited.add(__UpperCamelCase ) expand_state( __UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) close_list_anchor.append(__UpperCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__UpperCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
369
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device SCREAMING_SNAKE_CASE : List[str] = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def A__ (self ): '''simple docstring''' _lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _lowerCAmelCase = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
317
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
"""simple docstring""" def A ( snake_case :int ) -> int: __UpperCamelCase = [1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0 __UpperCamelCase = ugly_nums[ia] * 2 __UpperCamelCase = ugly_nums[ia] * 3 __UpperCamelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case ): __UpperCamelCase = min(snake_case , snake_case , snake_case ) ugly_nums.append(snake_case ) if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
316
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ :str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :str = ["ViTFeatureExtractor"] lowercase__ :int = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :int = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Any = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Optional[int] = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
97
lowercase__ :Any = 8.3_144_598 def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example lowercase__ :Optional[Any] = 300 lowercase__ :List[Any] = 28 lowercase__ :Dict = rms_speed_of_molecule(temperature, molar_mass) print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
97
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> int: __UpperCamelCase =parent __UpperCamelCase =batch_size __UpperCamelCase =seq_length __UpperCamelCase =is_training __UpperCamelCase =use_input_mask __UpperCamelCase =use_token_type_ids __UpperCamelCase =use_labels __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =type_vocab_size __UpperCamelCase =type_sequence_label_size __UpperCamelCase =initializer_range __UpperCamelCase =num_labels __UpperCamelCase =num_choices __UpperCamelCase =scope def _a ( self ) -> Dict: __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase =None if self.use_input_mask: __UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase =None if self.use_token_type_ids: __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None if self.use_labels: __UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self ) -> int: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict: __UpperCamelCase =OpenLlamaModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ ) __UpperCamelCase =model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: __UpperCamelCase =True __UpperCamelCase =OpenLlamaModel(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) __UpperCamelCase =model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]: __UpperCamelCase =OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: __UpperCamelCase =True __UpperCamelCase =True __UpperCamelCase =OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) __UpperCamelCase =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase =torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] __UpperCamelCase =model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice __UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCamelCase =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) ) def _a ( self ) -> List[str]: __UpperCamelCase =self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) =config_and_inputs __UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCAmelCase__ : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : str = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[int] = False def _a ( self ) -> List[str]: __UpperCamelCase =OpenLlamaModelTester(self ) __UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 ) def _a ( self ) -> Tuple: self.config_tester.run_common_tests() def _a ( self ) -> Any: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _a ( self ) -> str: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCamelCase =type self.model_tester.create_and_check_model(*A_ ) def _a ( self ) -> Dict: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase ='single_label_classification' __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =3 __UpperCamelCase ='multi_label_classification' __UpperCamelCase =input_dict['input_ids'] __UpperCamelCase =input_ids.ne(1 ).to(A_ ) __UpperCamelCase =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCamelCase =OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def _a ( self ) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =ids_tensor([1, 10] , config.vocab_size ) __UpperCamelCase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase =OpenLlamaModel(A_ ) original_model.to(A_ ) original_model.eval() __UpperCamelCase =original_model(A_ ).last_hidden_state __UpperCamelCase =original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase ={'type': scaling_type, 'factor': 10.0} __UpperCamelCase =OpenLlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() __UpperCamelCase =scaled_model(A_ ).last_hidden_state __UpperCamelCase =scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
62
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ): '''simple docstring''' super().__init__(*__lowercase , **__lowercase ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ): '''simple docstring''' __a = {} if "threshold" in kwargs: __a = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ): '''simple docstring''' return super().__call__(*__lowercase , **__lowercase ) def UpperCamelCase_ ( self : str , __lowercase : Tuple ): '''simple docstring''' __a = load_image(__lowercase ) __a = torch.IntTensor([[image.height, image.width]] ) __a = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: __a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) __a = target_size return inputs def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ): '''simple docstring''' __a = model_inputs.pop("""target_size""" ) __a = self.model(**__lowercase ) __a = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: __a = model_inputs["""bbox"""] return model_outputs def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ): '''simple docstring''' __a = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __a , __a = target_size[0].tolist() def unnormalize(__lowercase : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) __a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )] __a = ["""score""", """label""", """box"""] __a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase ) __a = raw_annotations[0] __a = raw_annotation["""scores"""] __a = raw_annotation["""labels"""] __a = raw_annotation["""boxes"""] __a = scores.tolist() __a = [self.model.config.idalabel[label.item()] for label in labels] __a = [self._get_bounding_box(__lowercase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __a = ["""score""", """label""", """box"""] __a = [ dict(zip(__lowercase , __lowercase ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) __a , __a , __a , __a = box.int().tolist() __a = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
302
0
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase = logging.get_logger(__name__) # General docstring UpperCAmelCase = '''RegNetConfig''' # Base docstring UpperCAmelCase = '''facebook/regnet-y-040''' UpperCAmelCase = [1, 1_088, 7, 7] # Image classification docstring UpperCAmelCase = '''facebook/regnet-y-040''' UpperCAmelCase = '''tabby, tabby cat''' UpperCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase :Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase :Dict = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='''VALID''' , groups=snake_case__ , use_bias=snake_case__ , name='''convolution''' , ) lowercase :Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase :List[Any] = ACTaFN[activation] if activation is not None else tf.identity def __snake_case ( self : str , snake_case__ : Tuple ): '''simple docstring''' lowercase :List[Any] = self.convolution(self.padding(snake_case__ ) ) lowercase :List[Any] = self.normalization(snake_case__ ) lowercase :List[Any] = self.activation(snake_case__ ) return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , snake_case__ : RegNetConfig , **snake_case__ : List[str] ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Union[str, Any] = config.num_channels lowercase :Tuple = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def __snake_case ( self : Optional[int] , snake_case__ : Tuple ): '''simple docstring''' lowercase :Dict = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase :str = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) lowercase :Any = self.embedder(snake_case__ ) return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : Tuple , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : Dict ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='''convolution''' ) lowercase :List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def __snake_case ( self : int , snake_case__ : tf.Tensor , snake_case__ : bool = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : Dict , snake_case__ : int , snake_case__ : int , **snake_case__ : Optional[Any] ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='''pooler''' ) lowercase :List[Any] = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def __snake_case ( self : Optional[Any] , snake_case__ : Dict ): '''simple docstring''' lowercase :Any = self.pooler(snake_case__ ) for layer_module in self.attention: lowercase :List[str] = layer_module(snake_case__ ) lowercase :Tuple = hidden_state * pooled return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : Dict , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : Optional[int] ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Tuple = in_channels != out_channels or stride != 1 lowercase :Dict = max(1 , out_channels // config.groups_width ) lowercase :Optional[int] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase :List[Any] = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='''layer.2''' ), ] lowercase :Union[str, Any] = ACTaFN[config.hidden_act] def __snake_case ( self : int , snake_case__ : Tuple ): '''simple docstring''' lowercase :Dict = hidden_state for layer_module in self.layers: lowercase :Optional[Any] = layer_module(snake_case__ ) lowercase :Optional[int] = self.shortcut(snake_case__ ) hidden_state += residual lowercase :Dict = self.activation(snake_case__ ) return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : List[Any] ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Dict = in_channels != out_channels or stride != 1 lowercase :Any = max(1 , out_channels // config.groups_width ) lowercase :Dict = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase :Dict = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='''layer.3''' ), ] lowercase :Dict = ACTaFN[config.hidden_act] def __snake_case ( self : int , snake_case__ : List[Any] ): '''simple docstring''' lowercase :Tuple = hidden_state for layer_module in self.layers: lowercase :Any = layer_module(snake_case__ ) lowercase :Dict = self.shortcut(snake_case__ ) hidden_state += residual lowercase :Dict = self.activation(snake_case__ ) return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : List[Any] ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase :str = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='''layers.0''' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __snake_case ( self : int , snake_case__ : str ): '''simple docstring''' for layer_module in self.layers: lowercase :str = layer_module(snake_case__ ) return hidden_state class __magic_name__ ( tf.keras.layers.Layer ): def __init__( self : Any , snake_case__ : RegNetConfig , **snake_case__ : int ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase :List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=f"""stages.{i+1}""" ) ) def __snake_case ( self : str , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ): '''simple docstring''' lowercase :Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase :Optional[Any] = hidden_states + (hidden_state,) lowercase :Optional[int] = stage_module(snake_case__ ) if output_hidden_states: lowercase :List[str] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class __magic_name__ ( tf.keras.layers.Layer ): __A : Optional[Any] = RegNetConfig def __init__( self : List[str] , snake_case__ : Dict , **snake_case__ : int ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Tuple = config lowercase :Optional[Any] = TFRegNetEmbeddings(snake_case__ , name='''embedder''' ) lowercase :int = TFRegNetEncoder(snake_case__ , name='''encoder''' ) lowercase :Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='''pooler''' ) @unpack_inputs def __snake_case ( self : str , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ): '''simple docstring''' lowercase :int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase :int = return_dict if return_dict is not None else self.config.use_return_dict lowercase :Tuple = self.embedder(snake_case__ , training=snake_case__ ) lowercase :Optional[Any] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) lowercase :Optional[Any] = encoder_outputs[0] lowercase :Union[str, Any] = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules lowercase :Tuple = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) lowercase :Any = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase :int = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class __magic_name__ ( __UpperCAmelCase ): __A : Optional[Any] = RegNetConfig __A : Any = "regnet" __A : Union[str, Any] = "pixel_values" @property def __snake_case ( self : Tuple ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} UpperCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' UpperCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __UpperCAmelCase , ) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : List[Any] , snake_case__ : RegNetConfig , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) lowercase :Optional[Any] = TFRegNetMainLayer(snake_case__ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __snake_case ( self : Optional[Any] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int]=False , ): '''simple docstring''' lowercase :Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict lowercase :List[Any] = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __UpperCAmelCase , ) class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ): def __init__( self : List[str] , snake_case__ : RegNetConfig , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) lowercase :Tuple = config.num_labels lowercase :Union[str, Any] = TFRegNetMainLayer(snake_case__ , name='''regnet''' ) # classification head lowercase :Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __snake_case ( self : Any , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : Union[str, Any]=False , ): '''simple docstring''' lowercase :List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase :Any = return_dict if return_dict is not None else self.config.use_return_dict lowercase :Any = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) lowercase :int = outputs.pooler_output if return_dict else outputs[1] lowercase :Any = self.classifier[0](snake_case__ ) lowercase :Union[str, Any] = self.classifier[1](snake_case__ ) lowercase :int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: lowercase :int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
172
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (a_ :Optional[int] , a_ :Union[str, Any] , a_ :Optional[Any]=None) -> List[Any]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" lowercase :int = nn.Parameter(a_) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" lowercase :Tuple = nn.Parameter(a_) def lowerCamelCase (a_ :int , a_ :Any , a_ :Optional[int]) -> List[Any]: # set torch weights for 1-to-1 comparison lowercase :str = np.asarray(weights[0]) lowercase :List[Any] = np.asarray(weights[1]) lowercase :Optional[int] = np.asarray(weights[2]) set_param( torch_layer.self_attention.query_key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , ) def lowerCamelCase (a_ :str , a_ :Any , a_ :Union[str, Any]) -> Dict: # set torch weights for 1-to-1 comparison lowercase :str = np.asarray(weights[0]) lowercase :Dict = np.asarray(weights[1]) lowercase :Dict = np.asarray(weights[2]) lowercase :Optional[Any] = np.asarray(weights[3]) set_param( torch_layer.self_attention.query , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , ) set_param( torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , ) def lowerCamelCase (a_ :Union[str, Any] , a_ :Dict , a_ :Optional[int]) -> Optional[Any]: # layernorm 1 lowercase :Optional[int] = weights[0][0][0] lowercase :Union[str, Any] = np.asarray(layer_norm_a[0]) lowercase :List[str] = np.asarray(layer_norm_a[1]) set_param( torch_block.attention.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # lsh weights + output lowercase :Optional[Any] = weights[0][1] if len(a_) < 4: set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_) else: set_layer_weights_in_torch_local(a_ , torch_block.attention , a_) # intermediate weighs lowercase :Optional[int] = weights[2][0][1][2] # Chunked Feed Forward if len(a_) == 4: lowercase :int = intermediate_weights[2] # layernorm 2 lowercase :int = np.asarray(intermediate_weights[0][0]) lowercase :Union[str, Any] = np.asarray(intermediate_weights[0][1]) set_param( torch_block.feed_forward.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # intermediate dense lowercase :Dict = np.asarray(intermediate_weights[1][0]) lowercase :Optional[Any] = np.asarray(intermediate_weights[1][1]) set_param( torch_block.feed_forward.dense.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) # intermediate out lowercase :Union[str, Any] = np.asarray(intermediate_weights[4][0]) lowercase :Tuple = np.asarray(intermediate_weights[4][1]) set_param( torch_block.feed_forward.output.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) def lowerCamelCase (a_ :Tuple , a_ :Dict , a_ :Tuple) -> str: # reformer model lowercase :Union[str, Any] = torch_model.reformer # word embeds lowercase :Tuple = np.asarray(weights[1]) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_) , ) if isinstance(weights[3] , a_): lowercase :str = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights)): lowercase :List[str] = np.asarray(weights[3][emb_idx][0]) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" lowercase :int = nn.Parameter(torch.tensor(a_)) lowercase :Dict = weights[5] assert len(torch_model_reformer.encoder.layers) * 4 == len( a_), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers): lowercase :Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(a_ , a_ , a_) # output layer norm lowercase :Dict = np.asarray(weights[7][0]) lowercase :Optional[Any] = np.asarray(weights[7][1]) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(a_) , torch.tensor(a_) , ) # output embeddings lowercase :str = np.asarray(weights[9][0]) lowercase :Union[str, Any] = np.asarray(weights[9][1]) set_param( torch_model.lm_head.decoder , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , ) def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Tuple) -> Union[str, Any]: # Initialise PyTorch model lowercase :Optional[Any] = ReformerConfig.from_json_file(a_) print(F"""Building PyTorch model from configuration: {config}""") lowercase :Dict = ReformerModelWithLMHead(a_) with open(a_ , '''rb''') as f: lowercase :Tuple = pickle.load(a_)['''weights'''] set_model_weights_in_torch(a_ , a_ , config.hidden_size) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , a_) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
172
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _lowercase ( lowercase__ ): __lowerCAmelCase : List[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ ): __lowerCAmelCase : int = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __lowerCAmelCase : List[str] = s_dict.pop(lowercase__ ) elif "subsample" in key: __lowerCAmelCase : Any = s_dict.pop(lowercase__ ) def _lowercase ( lowercase__ ): __lowerCAmelCase, __lowerCAmelCase : List[Any] = emb.weight.shape __lowerCAmelCase : int = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) __lowerCAmelCase : str = emb.weight.data return lin_layer def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[Any] = torch.load(lowercase__ , map_location='''cpu''' ) __lowerCAmelCase : int = mam_aaa['''args'''] __lowerCAmelCase : Optional[Any] = mam_aaa['''model'''] __lowerCAmelCase : List[str] = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(lowercase__ ) rename_keys(lowercase__ ) __lowerCAmelCase : Any = state_dict['''decoder.embed_tokens.weight'''].shape[0] __lowerCAmelCase : List[str] = args.share_decoder_input_output_embed __lowerCAmelCase : Any = [int(lowercase__ ) for i in args.conv_kernel_sizes.split(''',''' )] __lowerCAmelCase : Union[str, Any] = SpeechaTextConfig( vocab_size=lowercase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(lowercase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase__ , num_beams=5 , max_length=2_0_0 , use_cache=lowercase__ , decoder_start_token_id=2 , early_stopping=lowercase__ , ) __lowerCAmelCase : Tuple = SpeechaTextForConditionalGeneration(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : Tuple = model.model.load_state_dict(lowercase__ , strict=lowercase__ ) if len(lowercase__ ) > 0 and not set(lowercase__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: __lowerCAmelCase : int = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __lowerCAmelCase : str = lm_head_weights model.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") _UpperCamelCase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any]=False ): try: __UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: __UpperCAmelCase = strtobool(snake_case_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value _lowercase : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False) _lowercase : str = parse_flag_from_env('RUN_REMOTE', default=False) _lowercase : Dict = parse_flag_from_env('RUN_LOCAL', default=True) _lowercase : Optional[int] = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression _lowercase : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') _lowercase : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') _lowercase : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio _lowercase : Dict = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam _lowercase : int = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility _lowercase : Union[str, Any] = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows _lowercase : int = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def lowercase__ ( snake_case_ :List[str] ): try: import faiss # noqa except ImportError: __UpperCAmelCase = unittest.skip('''test requires faiss''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :str ): try: import regex # noqa except ImportError: __UpperCAmelCase = unittest.skip('''test requires regex''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :str ): try: import elasticsearch # noqa except ImportError: __UpperCAmelCase = unittest.skip('''test requires elasticsearch''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :Optional[Any] ): try: import sqlalchemy # noqa except ImportError: __UpperCAmelCase = unittest.skip('''test requires sqlalchemy''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :Optional[Any] ): if not config.TORCH_AVAILABLE: __UpperCAmelCase = unittest.skip('''test requires PyTorch''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :str ): if not config.TF_AVAILABLE: __UpperCAmelCase = unittest.skip('''test requires TensorFlow''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :List[Any] ): if not config.JAX_AVAILABLE: __UpperCAmelCase = unittest.skip('''test requires JAX''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :Any ): if not config.PIL_AVAILABLE: __UpperCAmelCase = unittest.skip('''test requires Pillow''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :int ): try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(snake_case_ ) else: return test_case def lowercase__ ( snake_case_ :List[Any] ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(snake_case_ ) else: return test_case def lowercase__ ( snake_case_ :int ): try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(snake_case_ ) else: return test_case def lowercase__ ( snake_case_ :str ): def _require_spacy_model(snake_case_ :Optional[int] ): try: import spacy # noqa F401 spacy.load(snake_case_ ) except ImportError: return unittest.skip('''test requires spacy''' )(snake_case_ ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(snake_case_ ) )(snake_case_ ) else: return test_case return _require_spacy_model def lowercase__ ( snake_case_ :Any ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(snake_case_ ) else: return test_case def lowercase__ ( snake_case_ :Dict ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(snake_case_ ) else: return test_case def lowercase__ ( snake_case_ :Optional[Any] ): if not _run_slow_tests or _run_slow_tests == 0: __UpperCAmelCase = unittest.skip('''test is slow''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :int ): if not _run_local_tests or _run_local_tests == 0: __UpperCAmelCase = unittest.skip('''test is local''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :int ): if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCAmelCase = unittest.skip('''test is packaged''' )(snake_case_ ) return test_case def lowercase__ ( snake_case_ :Any ): if not _run_remote_tests or _run_remote_tests == 0: __UpperCAmelCase = unittest.skip('''test requires remote''' )(snake_case_ ) return test_case def lowercase__ ( *snake_case_ :Tuple ): def decorate(cls :Optional[Any] ): for name, fn in cls.__dict__.items(): if callable(snake_case_ ) and name.startswith('''test''' ): for decorator in decorators: __UpperCAmelCase = decorator(snake_case_ ) setattr(cls , snake_case_ , snake_case_ ) return cls return decorate class _UpperCAmelCase ( _lowerCAmelCase ): pass class _UpperCAmelCase ( _lowerCAmelCase ): a__ : int = 0 a__ : Union[str, Any] = 1 a__ : List[str] = 2 @contextmanager def lowercase__ ( snake_case_ :str=OfflineSimulationMode.CONNECTION_FAILS , snake_case_ :List[str]=1E-16 ): __UpperCAmelCase = requests.Session().request def timeout_request(snake_case_ :Any , snake_case_ :Any , snake_case_ :Tuple , **snake_case_ :Union[str, Any] ): # Change the url to an invalid url so that the connection hangs __UpperCAmelCase = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCAmelCase = timeout try: return online_request(snake_case_ , snake_case_ , **snake_case_ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCAmelCase = url __UpperCAmelCase = e.args[0] __UpperCAmelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),) __UpperCAmelCase = (max_retry_error,) raise def raise_connection_error(snake_case_ :int , snake_case_ :Union[str, Any] , **snake_case_ :str ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=snake_case_ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , snake_case_ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , snake_case_ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def lowercase__ ( *snake_case_ :Tuple , **snake_case_ :Optional[Any] ): __UpperCAmelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*snake_case_ , **snake_case_ ) as tmp_dir: try: os.chdir(snake_case_ ) yield finally: os.chdir(snake_case_ ) @contextmanager def lowercase__ ( ): import gc gc.collect() __UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ): import gc gc.collect() __UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): return deepcopy(snake_case_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case_ ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( snake_case_ :str ): import decorator from requests.exceptions import HTTPError def _wrapper(snake_case_ :List[Any] , *snake_case_ :int , **snake_case_ :Optional[int] ): try: return func(*snake_case_ , **snake_case_ ) except HTTPError as err: if str(snake_case_ ).startswith('''500''' ) or str(snake_case_ ).startswith('''502''' ): pytest.xfail(str(snake_case_ ) ) raise err return decorator.decorator(_wrapper , snake_case_ ) class _UpperCAmelCase : def __init__( self : Optional[Any] , _lowercase : str , _lowercase : Dict , _lowercase : str ): __UpperCAmelCase = returncode __UpperCAmelCase = stdout __UpperCAmelCase = stderr async def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Optional[int] ): while True: __UpperCAmelCase = await stream.readline() if line: callback(snake_case_ ) else: break async def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict=None , snake_case_ :Tuple=None , snake_case_ :Optional[int]=None , snake_case_ :str=False , snake_case_ :Optional[Any]=False ): if echo: print('''\nRunning: ''' , ''' '''.join(snake_case_ ) ) __UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCAmelCase = [] __UpperCAmelCase = [] def tee(snake_case_ :Optional[int] , snake_case_ :List[Any] , snake_case_ :Tuple , snake_case_ :List[str]="" ): __UpperCAmelCase = line.decode('''utf-8''' ).rstrip() sink.append(snake_case_ ) if not quiet: print(snake_case_ , snake_case_ , file=snake_case_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stderr , label='''stderr:''' ) ), ] , timeout=snake_case_ , ) return _RunOutput(await p.wait() , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :str , snake_case_ :Optional[Any]=None , snake_case_ :Any=None , snake_case_ :Any=180 , snake_case_ :Optional[int]=False , snake_case_ :int=True ): __UpperCAmelCase = asyncio.get_event_loop() __UpperCAmelCase = loop.run_until_complete( _stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) ) __UpperCAmelCase = ''' '''.join(snake_case_ ) if result.returncode > 0: __UpperCAmelCase = '''\n'''.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ): __UpperCAmelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) __UpperCAmelCase = re.sub(r'''^gw''' , '''''' , snake_case_ , 0 , re.M ) return int(snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = 29_500 __UpperCAmelCase = pytest_xdist_worker_id() return port + uniq_delta
86
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
86
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: SCREAMING_SNAKE_CASE_ : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: SCREAMING_SNAKE_CASE_ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def __A ( self ) -> Tuple: SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}] ) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], ] , ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] ) # Legacy behavior SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__ ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]] ) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}], ] , ) SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_0', 'score': 0.5_04}, ] , ) @require_torch def __A ( self ) -> List[Any]: import torch SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] ) @require_tf def __A ( self ) -> Any: SCREAMING_SNAKE_CASE = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] ) @slow @require_torch def __A ( self ) -> Dict: SCREAMING_SNAKE_CASE = pipeline('text-classification' ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) SCREAMING_SNAKE_CASE = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] ) @slow @require_tf def __A ( self ) -> Any: SCREAMING_SNAKE_CASE = pipeline('text-classification' , framework='tf' ) SCREAMING_SNAKE_CASE = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) SCREAMING_SNAKE_CASE = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: SCREAMING_SNAKE_CASE = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 SCREAMING_SNAKE_CASE = 'HuggingFace is in' SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) SCREAMING_SNAKE_CASE = ['HuggingFace is in ', 'Paris is in France'] SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}, {'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [[{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] * N, [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] * N] , ) SCREAMING_SNAKE_CASE = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , {'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. SCREAMING_SNAKE_CASE = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(lowerCAmelCase__ ): text_classifier(lowerCAmelCase__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility SCREAMING_SNAKE_CASE = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
113
"""simple docstring""" from math import isclose, sqrt def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]: SCREAMING_SNAKE_CASE = point_y / 4 / point_x SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4 SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 SCREAMING_SNAKE_CASE = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) SCREAMING_SNAKE_CASE = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = first_x_coord SCREAMING_SNAKE_CASE = first_y_coord SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
113
1
"""simple docstring""" from __future__ import annotations from statistics import mean def lowerCamelCase (a_ :list[int] , a_ :list[int] , a_ :int) -> list[int]: lowercase :Dict = [0] * no_of_processes lowercase :List[Any] = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_snake_case): lowercase :Any = burst_time[i] lowercase :list[int] = [] lowercase :List[Any] = 0 lowercase :Any = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowercase :List[str] = [] lowercase :List[str] = -1 for i in range(_snake_case): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_snake_case) if len(_snake_case) > 0: lowercase :List[Any] = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowercase :Union[str, Any] = i total_time += burst_time[target_process] completed += 1 lowercase :str = 0 lowercase :List[str] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCamelCase (a_ :list[int] , a_ :int , a_ :list[int]) -> list[int]: lowercase :int = [0] * no_of_processes for i in range(_snake_case): lowercase :int = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') UpperCAmelCase = 4 UpperCAmelCase = [2, 5, 3, 7] UpperCAmelCase = [0, 0, 0, 0] UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes) UpperCAmelCase = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
369
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
172
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A_ ( __UpperCAmelCase ): _UpperCAmelCase : Any = '''naver-clova-ix/donut-base-finetuned-docvqa''' _UpperCAmelCase : Any = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) _UpperCAmelCase : Optional[int] = '''document_qa''' _UpperCAmelCase : List[str] = AutoProcessor _UpperCAmelCase : Optional[Any] = VisionEncoderDecoderModel _UpperCAmelCase : Optional[Any] = ['''image''', '''text'''] _UpperCAmelCase : int = ['''text'''] def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : str): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.') super().__init__(*UpperCamelCase__ ,**UpperCamelCase__) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : List[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __lowerCamelCase : Union[str, Any] = task_prompt.replace('{user_input}' ,UpperCamelCase__) __lowerCamelCase : int = self.pre_processor.tokenizer( UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,return_tensors='pt').input_ids __lowerCamelCase : Tuple = self.pre_processor(UpperCamelCase__ ,return_tensors='pt').pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): return self.model.generate( inputs['pixel_values'].to(self.device) ,decoder_input_ids=inputs['decoder_input_ids'].to(self.device) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=UpperCamelCase__ ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=UpperCamelCase__ ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=UpperCamelCase__ ,).sequences def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : Any = self.pre_processor.batch_decode(UpperCamelCase__)[0] __lowerCamelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.eos_token ,'') __lowerCamelCase : str = sequence.replace(self.pre_processor.tokenizer.pad_token ,'') __lowerCamelCase : Tuple = re.sub(R'<.*?>' ,'' ,UpperCamelCase__ ,count=1).strip() # remove first task start token __lowerCamelCase : Any = self.pre_processor.tokenajson(UpperCamelCase__) return sequence["answer"]
73
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _lowercase ( __snake_case ) -> Optional[int]: __lowerCAmelCase : str = np.inf def set_batch_size(__snake_case ) -> None: nonlocal batch_size if isinstance(__snake_case ,__snake_case ): __lowerCAmelCase : Any = min(__snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__snake_case ,__snake_case ): __lowerCAmelCase : Union[str, Any] = min(__snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__snake_case ,__snake_case ) and feature.dtype == "binary": __lowerCAmelCase : str = min(__snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__snake_case ,__snake_case ) return None if batch_size is np.inf else batch_size class A__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self: int , _SCREAMING_SNAKE_CASE: NestedDataStructureLike[PathLike] , _SCREAMING_SNAKE_CASE: Optional[NamedSplit] = None , _SCREAMING_SNAKE_CASE: Optional[Features] = None , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[int] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> str: """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __lowerCAmelCase : int = _PACKAGED_DATASETS_MODULES["parquet"][1] __lowerCAmelCase : Any = Parquet( cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def _SCREAMING_SNAKE_CASE ( self: int) -> int: """simple docstring""" if self.streaming: __lowerCAmelCase : int = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __lowerCAmelCase : str = None __lowerCAmelCase : Any = None __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Optional[int] = None self.builder.download_and_prepare( download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __lowerCAmelCase : List[str] = self.builder.as_dataset( split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset class A__ : '''simple docstring''' def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dataset , _SCREAMING_SNAKE_CASE: Union[PathLike, BinaryIO] , _SCREAMING_SNAKE_CASE: Optional[int] = None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> int: """simple docstring""" __lowerCAmelCase : Optional[Any] = dataset __lowerCAmelCase : Optional[int] = path_or_buf __lowerCAmelCase : int = batch_size or get_writer_batch_size(dataset.features) __lowerCAmelCase : str = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self: str) -> int: """simple docstring""" __lowerCAmelCase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , "wb+") as buffer: __lowerCAmelCase : int = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs) else: __lowerCAmelCase : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: BinaryIO , _SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> int: """simple docstring""" __lowerCAmelCase : str = 0 __lowerCAmelCase : Dict = parquet_writer_kwargs.pop("path_or_buf" , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : Optional[int] = self.dataset.features.arrow_schema __lowerCAmelCase : Any = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) for offset in logging.tqdm( range(0 , len(self.dataset) , _SCREAMING_SNAKE_CASE) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): __lowerCAmelCase : List[Any] = query_table( table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_SCREAMING_SNAKE_CASE) written += batch.nbytes writer.close() return written
58
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class A__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __lt__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict: """simple docstring""" return self[-1] < other[-1] def __eq__( self: int , _SCREAMING_SNAKE_CASE: Any) -> Tuple: """simple docstring""" return self[-1] == other[-1] def _lowercase ( __snake_case ) -> list: __lowerCAmelCase : list[Stack] = [] # sort into stacks for element in collection: __lowerCAmelCase : Dict = Stack([element] ) __lowerCAmelCase : str = bisect_left(__snake_case ,__snake_case ) if i != len(__snake_case ): stacks[i].append(__snake_case ) else: stacks.append(__snake_case ) # use a heap-based merge to merge stack efficiently __lowerCAmelCase : Union[str, Any] = merge(*(reversed(__snake_case ) for stack in stacks) ) return collection if __name__ == "__main__": __snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() __snake_case : Optional[int] = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
58
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : int = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = "efficientnet" def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 3.1 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase__ : List[int] = [] , UpperCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase__ : float = 0.25 , UpperCAmelCase__ : str = "swish" , UpperCAmelCase__ : int = 2_560 , UpperCAmelCase__ : str = "mean" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 0.001 , UpperCAmelCase__ : float = 0.99 , UpperCAmelCase__ : float = 0.5 , UpperCAmelCase__ : float = 0.2 , **UpperCAmelCase__ : str , ) ->str: '''simple docstring''' super().__init__(**__lowerCAmelCase) A__ = num_channels A__ = image_size A__ = width_coefficient A__ = depth_coefficient A__ = depth_divisor A__ = kernel_sizes A__ = in_channels A__ = out_channels A__ = depthwise_padding A__ = strides A__ = num_block_repeats A__ = expand_ratios A__ = squeeze_expansion_ratio A__ = hidden_act A__ = hidden_dim A__ = pooling_type A__ = initializer_range A__ = batch_norm_eps A__ = batch_norm_momentum A__ = dropout_rate A__ = drop_connect_rate A__ = sum(__lowerCAmelCase) * 4 class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' return 1e-5
14
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class snake_case__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" lowerCamelCase = """AutoTokenizer""" lowerCamelCase = ["""tokenizer"""] lowerCamelCase = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Any=None ) -> int: """simple docstring""" super().__init__(UpperCamelCase__ ) snake_case : int = speaker_embeddings @classmethod def lowerCAmelCase ( cls : int , UpperCamelCase__ : int , UpperCamelCase__ : Any="speaker_embeddings_path.json" , **UpperCamelCase__ : str ) -> int: """simple docstring""" if speaker_embeddings_dict_path is not None: snake_case : List[Any] = get_file_from_repo( UpperCamelCase__ , UpperCamelCase__ , subfolder=kwargs.pop('''subfolder''' , UpperCamelCase__ ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCamelCase__ ) , force_download=kwargs.pop('''force_download''' , UpperCamelCase__ ) , proxies=kwargs.pop('''proxies''' , UpperCamelCase__ ) , resume_download=kwargs.pop('''resume_download''' , UpperCamelCase__ ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCamelCase__ ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCamelCase__ ) , revision=kwargs.pop('''revision''' , UpperCamelCase__ ) , ) if speaker_embeddings_path is None: logger.warning( f'`{os.path.join(UpperCamelCase__ , UpperCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) snake_case : Tuple = None else: with open(UpperCamelCase__ ) as speaker_embeddings_json: snake_case : Dict = json.load(UpperCamelCase__ ) else: snake_case : str = None snake_case : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) return cls(tokenizer=UpperCamelCase__ , speaker_embeddings=UpperCamelCase__ ) def lowerCAmelCase ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str="speaker_embeddings_path.json" , UpperCamelCase__ : List[Any]="speaker_embeddings" , UpperCamelCase__ : bool = False , **UpperCamelCase__ : List[str] , ) -> Any: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(UpperCamelCase__ , UpperCamelCase__ , '''v2''' ) , exist_ok=UpperCamelCase__ ) snake_case : List[Any] = {} snake_case : Optional[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": snake_case : Optional[Any] = self._load_voice_preset(UpperCamelCase__ ) snake_case : Optional[int] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , UpperCamelCase__ , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=UpperCamelCase__ , ) snake_case : Dict = os.path.join(UpperCamelCase__ , f'{prompt_key}_{key}.npy' ) snake_case : List[str] = tmp_dict with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) super().save_pretrained(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def lowerCAmelCase ( self : int , UpperCamelCase__ : str = None , **UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" snake_case : List[str] = self.speaker_embeddings[voice_preset] snake_case : Union[str, Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) snake_case : Optional[int] = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , UpperCamelCase__ ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCamelCase__ ) , force_download=kwargs.pop('''force_download''' , UpperCamelCase__ ) , proxies=kwargs.pop('''proxies''' , UpperCamelCase__ ) , resume_download=kwargs.pop('''resume_download''' , UpperCamelCase__ ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCamelCase__ ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCamelCase__ ) , revision=kwargs.pop('''revision''' , UpperCamelCase__ ) , ) if path is None: raise ValueError( f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) snake_case : Tuple = np.load(UpperCamelCase__ ) return voice_preset_dict def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Optional[dict] = None ) -> str: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__( self : int , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict="pt" , UpperCamelCase__ : Union[str, Any]=256 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Optional[int]: """simple docstring""" if voice_preset is not None and not isinstance(UpperCamelCase__ , UpperCamelCase__ ): if ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): snake_case : str = self._load_voice_preset(UpperCamelCase__ ) else: if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not voice_preset.endswith('''.npz''' ): snake_case : int = voice_preset + '''.npz''' snake_case : Tuple = np.load(UpperCamelCase__ ) if voice_preset is not None: self._validate_voice_preset_dict(UpperCamelCase__ , **UpperCamelCase__ ) snake_case : Tuple = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ ) snake_case : Union[str, Any] = self.tokenizer( UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding='''max_length''' , max_length=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) if voice_preset is not None: snake_case : Optional[int] = voice_preset return encoded_text
83
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class snake_case__ ( unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionLDMaDPipeline lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case : Dict = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) snake_case : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case : int = CLIPTextModel(UpperCamelCase__ ) snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ) -> Optional[int]: """simple docstring""" if str(UpperCamelCase__ ).startswith('''mps''' ): snake_case : Optional[int] = torch.manual_seed(UpperCamelCase__ ) else: snake_case : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) snake_case : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" snake_case : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case : List[Any] = self.get_dummy_components() snake_case : str = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) snake_case : Union[str, Any] = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : List[str] = self.get_dummy_inputs(UpperCamelCase__ ) snake_case : Tuple = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : int = output.rgb, output.depth snake_case : str = rgb[0, -3:, -3:, -1] snake_case : Union[str, Any] = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) snake_case : int = np.array( [0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] ) snake_case : str = np.array([103.46_727, 85.812_004, 87.849_236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" snake_case : int = self.get_dummy_components() snake_case : Any = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : int = self.get_dummy_inputs(UpperCamelCase__ ) snake_case : str = 3 * [inputs['''prompt''']] # forward snake_case : Union[str, Any] = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth snake_case : Tuple = rgb_slice_a[0, -3:, -3:, -1] snake_case : List[str] = depth_slice_a[0, -3:, -1] snake_case : int = self.get_dummy_inputs(UpperCamelCase__ ) snake_case : Optional[int] = 3 * [inputs.pop('''prompt''' )] snake_case : Dict = ldmad_pipe.tokenizer( UpperCamelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , ) snake_case : Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase__ ) snake_case : Any = ldmad_pipe.text_encoder(UpperCamelCase__ )[0] snake_case : Tuple = prompt_embeds # forward snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : Dict = output.rgb, output.depth snake_case : Any = rgb_slice_a[0, -3:, -3:, -1] snake_case : Dict = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case : Dict = self.get_dummy_components() snake_case : List[str] = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) snake_case : Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) snake_case : Dict = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : Dict = self.get_dummy_inputs(UpperCamelCase__ ) snake_case : str = '''french fries''' snake_case : List[str] = ldmad_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ ) snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth snake_case : Union[str, Any] = rgb[0, -3:, -3:, -1] snake_case : int = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) snake_case : Dict = np.array( [0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] ) snake_case : Any = np.array([107.84_738, 84.62_802, 89.962_135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Union[str, Any]=0 ) -> Any: """simple docstring""" snake_case : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) ) snake_case : Any = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) snake_case : List[Any] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" snake_case : Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ) snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : Any = self.get_inputs(UpperCamelCase__ ) snake_case : str = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : List[Any] = output.rgb, output.depth snake_case : Optional[Any] = rgb[0, -3:, -3:, -1].flatten() snake_case : Tuple = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) snake_case : Optional[Any] = np.array( [0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] ) snake_case : str = np.array( [0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class snake_case__ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Optional[int]=0 ) -> str: """simple docstring""" snake_case : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) ) snake_case : int = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) snake_case : List[str] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : Dict = self.get_inputs(UpperCamelCase__ ) snake_case : str = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth snake_case : Union[str, Any] = 0.495_586 snake_case : Tuple = 0.33_795_515 snake_case : Dict = 112.48_518 snake_case : Optional[int] = 98.489_746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" snake_case : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) snake_case : int = self.get_inputs(UpperCamelCase__ ) snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ ) snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth snake_case : Tuple = 0.4_194_127 snake_case : Optional[Any] = 0.35_375_586 snake_case : Any = 0.5_638_502 snake_case : int = 0.34_686_103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
83
1
"""simple docstring""" from manim import * class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def snake_case ( self ): __lowerCAmelCase = Rectangle(height=0.5 , width=0.5 ) __lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __lowerCAmelCase = [mem.copy() for i in range(6 )] __lowerCAmelCase = [mem.copy() for i in range(6 )] __lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 ) __lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 ) __lowerCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 ) __lowerCAmelCase = Text("CPU" , font_size=24 ) __lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) __lowerCAmelCase = [mem.copy() for i in range(4 )] __lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 ) __lowerCAmelCase = Text("GPU" , font_size=24 ) __lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) gpu.move_to([-1, -1, 0] ) self.add(__a ) __lowerCAmelCase = [mem.copy() for i in range(6 )] __lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 ) __lowerCAmelCase = Text("Model" , font_size=24 ) __lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.add(__a ) __lowerCAmelCase = [] for i, rect in enumerate(__a ): rect.set_stroke(__a ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) __lowerCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 ) self.add(__a ) cpu_targs.append(__a ) __lowerCAmelCase = [mem.copy() for i in range(6 )] __lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 ) __lowerCAmelCase = Text("Loaded Checkpoint" , font_size=24 ) __lowerCAmelCase = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) __lowerCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowerCAmelCase = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__a , __a ) __lowerCAmelCase = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) __lowerCAmelCase = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__a ) , Write(__a ) ) self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) ) __lowerCAmelCase = [] __lowerCAmelCase = [] for i, rect in enumerate(__a ): __lowerCAmelCase = fill.copy().set_fill(__a , opacity=0.7 ) target.move_to(__a ) first_animations.append(GrowFromCenter(__a , run_time=1 ) ) __lowerCAmelCase = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__a , run_time=1.5 ) ) self.play(*__a ) self.play(*__a ) self.wait()
57
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""pixel_values"""] def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 2_24} UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" ) UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = crop_pct UpperCamelCase__ = resample UpperCamelCase__ = do_center_crop UpperCamelCase__ = crop_size UpperCamelCase__ = do_rescale UpperCamelCase__ = rescale_factor UpperCamelCase__ = do_normalize UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCamelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: UpperCamelCase__ = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: UpperCamelCase__ = int(size["""height"""] / crop_pct ) else: UpperCamelCase__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: UpperCamelCase__ = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct UpperCamelCase__ = resample if resample is not None else self.resample UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean UpperCamelCase__ = image_std if image_std is not None else self.image_std UpperCamelCase__ = size if size is not None else self.size UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" ) UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: UpperCamelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: UpperCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: UpperCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
244
0
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) _a = logging.getLogger(__name__) _a = {"""facebook/bart-base""": BartForConditionalGeneration} _a = {"""facebook/bart-base""": BartTokenizer} def lowerCamelCase__ ( ) -> str: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''', type=__snake_case, default=__snake_case, help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''', type=__snake_case, default=5, help='''The maximum total input sequence length after tokenization.''', ) parser.add_argument( '''--num_beams''', type=__snake_case, default=__snake_case, help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ), ) parser.add_argument( '''--model_name_or_path''', type=__snake_case, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=__snake_case, ) parser.add_argument( '''--config_name''', type=__snake_case, default=__snake_case, help='''Pretrained config name or path if not the same as model_name''', ) parser.add_argument( '''--device''', type=__snake_case, default='''cpu''', help='''Device where the model will be run''', ) parser.add_argument('''--output_file_path''', type=__snake_case, default=__snake_case, help='''Where to store the final ONNX file.''' ) _UpperCamelCase = parser.parse_args() return args def lowerCamelCase__ ( __snake_case, __snake_case="cpu" ) -> Any: """simple docstring""" _UpperCamelCase = model_dict[model_name].from_pretrained(__snake_case ).to(__snake_case ) _UpperCamelCase = tokenizer_dict[model_name].from_pretrained(__snake_case ) if model_name in ["facebook/bart-base"]: _UpperCamelCase = 0 _UpperCamelCase = None _UpperCamelCase = 0 return huggingface_model, tokenizer def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]: """simple docstring""" model.eval() _UpperCamelCase = None _UpperCamelCase = torch.jit.script(BARTBeamSearchGenerator(__snake_case ) ) with torch.no_grad(): _UpperCamelCase = '''My friends are cool but they eat too many carbs.''' _UpperCamelCase = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=10_24, return_tensors='''pt''' ).to(model.device ) _UpperCamelCase = model.generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=__snake_case, max_length=__snake_case, early_stopping=__snake_case, decoder_start_token_id=model.config.decoder_start_token_id, ) torch.onnx.export( __snake_case, ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ), __snake_case, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, }, example_outputs=__snake_case, ) logger.info('''Model exported to {}'''.format(__snake_case ) ) _UpperCamelCase = remove_dup_initializers(os.path.abspath(__snake_case ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(__snake_case ) ) _UpperCamelCase = onnxruntime.InferenceSession(__snake_case ) _UpperCamelCase = ort_sess.run( __snake_case, { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(__snake_case ), '''max_length''': np.array(__snake_case ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), }, ) np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1e-3, atol=1e-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def lowerCamelCase__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = parse_args() _UpperCamelCase = 5 _UpperCamelCase = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _UpperCamelCase = torch.device(args.device ) _UpperCamelCase , _UpperCamelCase = load_model_tokenizer(args.model_name_or_path, __snake_case ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(__snake_case ) if args.max_length: _UpperCamelCase = args.max_length if args.num_beams: _UpperCamelCase = args.num_beams if args.output_file_path: _UpperCamelCase = args.output_file_path else: _UpperCamelCase = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) if __name__ == "__main__": main()
369
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _a = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]: """simple docstring""" return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = _TestCommandArgs(dataset=__snake_case, all_configs=__snake_case, save_infos=__snake_case ) _UpperCamelCase = TestCommand(*__snake_case ) test_command.run() _UpperCamelCase = os.path.join(__snake_case, '''README.md''' ) assert os.path.exists(__snake_case ) _UpperCamelCase = DatasetInfosDict.from_directory(__snake_case ) _UpperCamelCase = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ), splits=[ { '''name''': '''train''', '''num_bytes''': 2_35_15_63, '''num_examples''': 1_00_00, }, { '''name''': '''validation''', '''num_bytes''': 23_84_18, '''num_examples''': 10_00, }, ], download_size=3_94_06_80, dataset_size=2_58_99_81, ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: _UpperCamelCase , _UpperCamelCase = getattr(dataset_infos['''default'''], __snake_case ), getattr(expected_dataset_infos['''default'''], __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case, __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes ) else: result == expected
100
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Union[str, Any] = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='blenderbot-small' lowerCamelCase__ =['past_key_values'] lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self , a_=5_02_65 , a_=5_12 , a_=8 , a_=20_48 , a_=16 , a_=8 , a_=20_48 , a_=16 , a_=0.0 , a_=0.0 , a_=True , a_=True , a_="gelu" , a_=5_12 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1 , a_=False , a_=0 , a_=1 , a_=2 , a_=2 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : Any = max_position_embeddings __snake_case : Tuple = d_model __snake_case : str = encoder_ffn_dim __snake_case : Optional[int] = encoder_layers __snake_case : int = encoder_attention_heads __snake_case : Optional[Any] = decoder_ffn_dim __snake_case : List[str] = decoder_layers __snake_case : List[str] = decoder_attention_heads __snake_case : Union[str, Any] = dropout __snake_case : str = attention_dropout __snake_case : List[Any] = activation_dropout __snake_case : Union[str, Any] = activation_function __snake_case : List[Any] = init_std __snake_case : List[str] = encoder_layerdrop __snake_case : List[Any] = decoder_layerdrop __snake_case : Optional[int] = use_cache __snake_case : Optional[Any] = encoder_layers __snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __snake_case : Optional[Any] = {0: '''batch'''} __snake_case : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __snake_case : Dict = {0: '''batch''', 1: '''decoder_sequence'''} __snake_case : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __snake_case : str = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __snake_case , __snake_case : Tuple = self.num_layers for i in range(a_ ): __snake_case : int = {0: '''batch''', 2: '''past_sequence + sequence'''} __snake_case : int = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __snake_case : Dict = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case : str = super().outputs else: __snake_case : List[Any] = super(a_ , self ).outputs if self.use_past: __snake_case , __snake_case : int = self.num_layers for i in range(a_ ): __snake_case : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''} __snake_case : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): '''simple docstring''' __snake_case : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) # Generate decoder inputs __snake_case : str = seq_length if not self.use_past else 1 __snake_case : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __snake_case : Dict = dict(**a_ , **a_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __snake_case , __snake_case : int = common_inputs['''input_ids'''].shape __snake_case : Union[str, Any] = common_inputs['''decoder_input_ids'''].shape[1] __snake_case , __snake_case : Union[str, Any] = self.num_attention_heads __snake_case : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case : Tuple = decoder_seq_length + 3 __snake_case : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __snake_case : Optional[Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(a_ , a_ )] , dim=1 ) __snake_case : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __snake_case , __snake_case : str = self.num_layers __snake_case : Dict = min(a_ , a_ ) __snake_case : List[str] = max(a_ , a_ ) - min_num_layers __snake_case : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(a_ ): common_inputs["past_key_values"].append( ( torch.zeros(a_ ), torch.zeros(a_ ), torch.zeros(a_ ), torch.zeros(a_ ), ) ) # TODO: test this. __snake_case : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(a_ , a_ ): common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) ) return common_inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): '''simple docstring''' __snake_case : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __snake_case , __snake_case : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __snake_case : Optional[int] = seqlen + 2 __snake_case , __snake_case : Dict = self.num_layers __snake_case , __snake_case : Optional[Any] = self.num_attention_heads __snake_case : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case : str = common_inputs['''attention_mask'''].dtype __snake_case : Optional[int] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) __snake_case : List[str] = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ ) ] return common_inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): '''simple docstring''' __snake_case : Dict = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case : List[Any] = tokenizer.num_special_tokens_to_add(a_ ) __snake_case : Union[str, Any] = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ ) # Generate dummy inputs according to compute batch and sequence __snake_case : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __snake_case : Union[str, Any] = dict(tokenizer(a_ , return_tensors=a_ ) ) return common_inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) elif self.task == "causal-lm": __snake_case : str = self._generate_dummy_inputs_for_causal_lm( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) else: __snake_case : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) return common_inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case : Tuple = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ ) else: __snake_case : str = super(a_ , self )._flatten_past_key_values_( a_ , a_ , a_ , a_ )
102
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : List[str] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __snake_case : Optional[Any] = len(a_ ) - 1 def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , a_ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(a_ ) , 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : List[str] = self.basis_function(a_ ) __snake_case : str = 0.0 __snake_case : Union[str, Any] = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ): '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __snake_case : list[float] = [] # x coordinates of points to plot __snake_case : list[float] = [] # y coordinates of points to plot __snake_case : int = 0.0 while t <= 1: __snake_case : Union[str, Any] = self.bezier_curve_function(a_ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __snake_case : List[Any] = [i[0] for i in self.list_of_points] __snake_case : Any = [i[1] for i in self.list_of_points] plt.plot( a_ , a_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(a_ , a_ , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
102
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor __snake_case = logging.get_logger(__name__) class lowercase ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , __a , ) super().__init__(*__a , **__a )
350
'''simple docstring''' from math import ceil def a ( __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :str = list(range(0 , __a ) ) UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check UpperCamelCase__ :Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks] UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(__a ) ) if len(__a ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(__a ) ) if len(__a ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(__a ) ) def a ( __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Optional[Any] = list(range(__a ) ) UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) ) UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
219
0
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( lowercase , unittest.TestCase ): UpperCAmelCase : Dict = GPTSanJapaneseTokenizer UpperCAmelCase : Dict = False UpperCAmelCase : Union[str, Any] = {"""do_clean_text""": False, """add_prefix_space""": False} def _lowercase (self : str) -> str: super().setUp() # fmt: off __snake_case : Any = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on __snake_case : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 __snake_case : Union[str, Any] = {'unk_token': '<unk>'} __snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) with open(self.emoji_file , 'w') as emoji_writer: emoji_writer.write(json.dumps(_A)) def _lowercase (self : List[Any] , **_A : Union[str, Any]) -> int: kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_A) def _lowercase (self : List[str] , _A : Tuple) -> Tuple: __snake_case : List[str] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' __snake_case : Dict = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def _lowercase (self : int , _A : Dict) -> Any: __snake_case , __snake_case : List[Any] = self.get_input_output_texts(_A) __snake_case : str = tokenizer.encode(_A , add_special_tokens=_A) __snake_case : int = tokenizer.decode(_A , clean_up_tokenization_spaces=_A) return text, ids def _lowercase (self : str) -> Tuple: pass # TODO add if relevant def _lowercase (self : int) -> Union[str, Any]: pass # TODO add if relevant def _lowercase (self : List[str]) -> List[Any]: pass # TODO add if relevant def _lowercase (self : Union[str, Any]) -> List[str]: __snake_case : Any = self.get_tokenizer() # Testing tokenization __snake_case : Optional[int] = 'こんにちは、世界。 こんばんは、㔺界。' __snake_case : str = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] __snake_case : str = tokenizer.tokenize(_A) self.assertListEqual(_A , _A) # Testing conversion to ids without special tokens __snake_case : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] __snake_case : List[Any] = tokenizer.convert_tokens_to_ids(_A) self.assertListEqual(_A , _A) # Testing conversion to ids with special tokens __snake_case : Optional[int] = tokens + [tokenizer.unk_token] __snake_case : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] __snake_case : Any = tokenizer.convert_tokens_to_ids(_A) self.assertListEqual(_A , _A) def _lowercase (self : Optional[int]) -> Optional[Any]: __snake_case : Tuple = self.get_tokenizer() # Testing tokenization __snake_case : Any = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' __snake_case : Optional[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' __snake_case : List[Any] = tokenizer.encode(_A) __snake_case : Optional[Any] = tokenizer.decode(_A) self.assertEqual(_A , _A) @slow def _lowercase (self : int) -> Optional[int]: __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization __snake_case : str = 'こんにちは、世界。' __snake_case : Dict = 'こんばんは、㔺界。😀' __snake_case : List[str] = 'こんにちは、世界。こんばんは、世界。😀' __snake_case : Dict = tokenizer.encode(prefix_text + input_text) __snake_case : List[str] = tokenizer.encode('' , prefix_text=prefix_text + input_text) __snake_case : int = tokenizer.encode(_A , prefix_text=_A) __snake_case : Dict = tokenizer.decode(_A) __snake_case : Union[str, Any] = tokenizer.decode(_A) __snake_case : List[Any] = tokenizer.decode(_A) self.assertEqual(_A , _A) self.assertEqual(_A , _A) self.assertEqual(_A , _A) @slow def _lowercase (self : Optional[Any]) -> int: __snake_case : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization __snake_case : List[str] = 'こんにちは、世界。' __snake_case : Dict = 'こんばんは、㔺界。😀' __snake_case : int = len(tokenizer.encode(_A)) - 2 __snake_case : str = len(tokenizer.encode(_A)) - 2 __snake_case : List[str] = [1] + [0] * (len_prefix + len_text + 1) __snake_case : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0] __snake_case : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) __snake_case : Dict = tokenizer(prefix_text + input_text).token_type_ids __snake_case : Optional[int] = tokenizer('' , prefix_text=prefix_text + input_text).token_type_ids __snake_case : str = tokenizer(_A , prefix_text=_A).token_type_ids self.assertListEqual(_A , _A) self.assertListEqual(_A , _A) self.assertListEqual(_A , _A) @slow def _lowercase (self : int) -> Optional[Any]: __snake_case : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') __snake_case : int = tokenizer.encode('あンいワ') __snake_case : Dict = tokenizer.encode('' , prefix_text='あンいワ') __snake_case : Dict = tokenizer.encode('いワ' , prefix_text='あン') self.assertEqual(tokenizer.decode(_A) , tokenizer.decode(_A)) self.assertEqual(tokenizer.decode(_A) , tokenizer.decode(_A)) self.assertNotEqual(_A , _A) self.assertNotEqual(_A , _A) self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token @slow def _lowercase (self : Tuple) -> List[str]: __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') __snake_case : Dict = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] __snake_case : List[Any] = tokenizer(_A , padding=_A) __snake_case : List[str] = tokenizer.batch_encode_plus(_A , padding=_A) # fmt: off __snake_case : List[Any] = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] __snake_case : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] __snake_case : List[str] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , _A) self.assertListEqual(x_token.token_type_ids , _A) self.assertListEqual(x_token.attention_mask , _A) self.assertListEqual(x_token_a.input_ids , _A) self.assertListEqual(x_token_a.token_type_ids , _A) self.assertListEqual(x_token_a.attention_mask , _A) def _lowercase (self : Any) -> int: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def _lowercase (self : List[str]) -> Tuple: # tokenizer has no padding token pass
172
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = [] __snake_case : Optional[Any] = set({'(', '[', '{'} ) __snake_case : Union[str, Any] = set({')', ']', '}'} ) __snake_case : Tuple = {'{': '}', '[': ']', '(': ')'} for i in range(len(UpperCAmelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCAmelCase_ ) == 0 or (len(UpperCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCAmelCase_ ) == 0 def __UpperCAmelCase ( ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = input('Enter sequence of brackets: ' ) if is_balanced(UpperCAmelCase_ ): print(UpperCAmelCase_ , 'is balanced' ) else: print(UpperCAmelCase_ , 'is not balanced' ) if __name__ == "__main__": main()
172
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Optional[Any] = logging.get_logger(__name__) snake_case : Union[str, Any] = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class _snake_case ( snake_case ): UpperCamelCase__ = 'transfo-xl' UpperCamelCase__ = ['mems'] UpperCamelCase__ = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _a=267_735 , _a=[20_000, 40_000, 200_000] , _a=1_024 , _a=1_024 , _a=16 , _a=64 , _a=4_096 , _a=4 , _a=False , _a=18 , _a=1_600 , _a=1_000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ): __magic_name__ : List[Any] = vocab_size __magic_name__ : Dict = [] self.cutoffs.extend(_a ) if proj_share_all_but_first: __magic_name__ : List[str] = [False] + [True] * len(self.cutoffs ) else: __magic_name__ : Optional[Any] = [False] + [False] * len(self.cutoffs ) __magic_name__ : Optional[int] = d_model __magic_name__ : str = d_embed __magic_name__ : Optional[Any] = d_head __magic_name__ : Optional[int] = d_inner __magic_name__ : List[str] = div_val __magic_name__ : List[str] = pre_lnorm __magic_name__ : Union[str, Any] = n_layer __magic_name__ : Optional[int] = n_head __magic_name__ : str = mem_len __magic_name__ : int = same_length __magic_name__ : Dict = attn_type __magic_name__ : int = clamp_len __magic_name__ : Optional[int] = sample_softmax __magic_name__ : List[Any] = adaptive __magic_name__ : Optional[int] = dropout __magic_name__ : Optional[int] = dropatt __magic_name__ : Optional[Any] = untie_r __magic_name__ : List[str] = init __magic_name__ : Any = init_range __magic_name__ : Optional[int] = proj_init_std __magic_name__ : List[Any] = init_std __magic_name__ : List[Any] = layer_norm_epsilon super().__init__(eos_token_id=_a , **_a ) @property def SCREAMING_SNAKE_CASE ( self ): # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def SCREAMING_SNAKE_CASE ( self , _a ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
41
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Optional[Any] = logging.get_logger(__name__) snake_case : Union[str, Any] = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class _snake_case ( snake_case ): UpperCamelCase__ = 'transfo-xl' UpperCamelCase__ = ['mems'] UpperCamelCase__ = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _a=267_735 , _a=[20_000, 40_000, 200_000] , _a=1_024 , _a=1_024 , _a=16 , _a=64 , _a=4_096 , _a=4 , _a=False , _a=18 , _a=1_600 , _a=1_000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ): __magic_name__ : List[Any] = vocab_size __magic_name__ : Dict = [] self.cutoffs.extend(_a ) if proj_share_all_but_first: __magic_name__ : List[str] = [False] + [True] * len(self.cutoffs ) else: __magic_name__ : Optional[Any] = [False] + [False] * len(self.cutoffs ) __magic_name__ : Optional[int] = d_model __magic_name__ : str = d_embed __magic_name__ : Optional[Any] = d_head __magic_name__ : Optional[int] = d_inner __magic_name__ : List[str] = div_val __magic_name__ : List[str] = pre_lnorm __magic_name__ : Union[str, Any] = n_layer __magic_name__ : Optional[int] = n_head __magic_name__ : str = mem_len __magic_name__ : int = same_length __magic_name__ : Dict = attn_type __magic_name__ : int = clamp_len __magic_name__ : Optional[int] = sample_softmax __magic_name__ : List[Any] = adaptive __magic_name__ : Optional[int] = dropout __magic_name__ : Optional[int] = dropatt __magic_name__ : Optional[Any] = untie_r __magic_name__ : List[str] = init __magic_name__ : Any = init_range __magic_name__ : Optional[int] = proj_init_std __magic_name__ : List[Any] = init_std __magic_name__ : List[Any] = layer_norm_epsilon super().__init__(eos_token_id=_a , **_a ) @property def SCREAMING_SNAKE_CASE ( self ): # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def SCREAMING_SNAKE_CASE ( self , _a ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
41
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None ): require_version(deps[pkg] , _UpperCamelCase )
86
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = BarthezTokenizer A_ : Tuple = BarthezTokenizerFast A_ : Dict = True A_ : List[str] = True def __lowerCamelCase ( self ): super().setUp() __lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = tokenizer def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = '<pad>' __lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2] __lowerCAmelCase : Optional[int] = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowerCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): if not self.test_rust_tokenizer: return __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.' __lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # fmt: off __lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowerCAmelCase : Union[str, Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
86
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a__ = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' a__ = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' a__ = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' a__ = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' a__ = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Optional[Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def __lowercase ( self , _a , _a , _a=[1, 1_0, 1_0_0] , _a=4 , _a=3.0 ) -> Tuple: if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=_a ) as executor: _a : Dict = [] _a : str = Counter() _a : Any = 0 _a : List[str] = defaultdict(_a ) for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ): for candidate in candidates: _a : Any = candidate + '''\n''' + test_case _a : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id]) _a : Dict = executor.submit(_a , *_a ) futures.append(_a ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_a ): _a : Any = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) _a , _a : Dict = [], [] for result in results.values(): result.sort() _a : str = [r[1]['''passed'''] for r in result] total.append(len(_a ) ) correct.append(sum(_a ) ) _a : str = np.array(_a ) _a : int = np.array(_a ) _a : List[str] = k _a : Dict = {F"""pass@{k}""": estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __UpperCAmelCase ( __a : Tuple ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" def estimator(__a : int ,__a : int ,__a : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) ) if isinstance(__a ,__a ): _a : List[str] = itertools.repeat(__a ,len(__a ) ) else: assert len(__a ) == len(__a ) _a : Optional[Any] = iter(__a ) return np.array([estimator(int(__a ) ,int(__a ) ,__a ) for n, c in zip(__a ,__a )] )
15
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a , _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
15
1
import re import string import numpy as np import datasets SCREAMING_SNAKE_CASE :Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' SCREAMING_SNAKE_CASE :Tuple = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' SCREAMING_SNAKE_CASE :str = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Value("string" ,id="sequence" ), "references": datasets.Value("string" ,id="sequence" ), } ) ,reference_urls=[] ,) def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : List[Any] ,A : Any=None ,A : List[Any]=False ,A : Any=False ,A : Dict=False ,): if regexes_to_ignore is not None: for s in regexes_to_ignore: __A = np.array([re.sub(A ,"" ,A ) for x in predictions] ) __A = np.array([re.sub(A ,"" ,A ) for x in references] ) else: __A = np.asarray(A ) __A = np.asarray(A ) if ignore_case: __A = np.char.lower(A ) __A = np.char.lower(A ) if ignore_punctuation: __A = string.punctuation.maketrans("" ,"" ,string.punctuation ) __A = np.char.translate(A ,table=A ) __A = np.char.translate(A ,table=A ) if ignore_numbers: __A = string.digits.maketrans("" ,"" ,string.digits ) __A = np.char.translate(A ,table=A ) __A = np.char.translate(A ,table=A ) __A = predictions == references return {"exact_match": np.mean(A ) * 1_00}
15
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
211
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ): for attribute in key.split("." ): snake_case : str = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case : str = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case : Optional[int] = value elif weight_type == "weight_g": snake_case : List[Any] = value elif weight_type == "weight_v": snake_case : Union[str, Any] = value elif weight_type == "bias": snake_case : Optional[int] = value else: snake_case : str = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ): snake_case : Dict = [] snake_case : Optional[int] = fairseq_model.state_dict() snake_case : Optional[int] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) snake_case : str = True else: for key, mapped_key in MAPPING.items(): snake_case : Tuple = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): snake_case : Optional[Any] = True if "*" in mapped_key: snake_case : Dict = name.split(_lowerCAmelCase )[0].split("." )[-2] snake_case : Any = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: snake_case : List[Any] = """weight_g""" elif "weight_v" in name: snake_case : Optional[Any] = """weight_v""" elif "weight" in name: snake_case : str = """weight""" elif "bias" in name: snake_case : List[str] = """bias""" else: snake_case : str = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict ): snake_case : Optional[int] = full_name.split("conv_layers." )[-1] snake_case : List[str] = name.split("." ) snake_case : List[str] = int(items[0] ) snake_case : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case : Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=True ): if config_path is not None: snake_case : Optional[Any] = HubertConfig.from_pretrained(_lowerCAmelCase ) else: snake_case : str = HubertConfig() if is_finetuned: if dict_path: snake_case : Optional[int] = Dictionary.load(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case : Optional[int] = target_dict.pad_index snake_case : int = target_dict.bos_index snake_case : Tuple = target_dict.eos_index snake_case : List[Any] = len(target_dict.symbols ) snake_case : int = os.path.join(_lowerCAmelCase , "vocab.json" ) if not os.path.isdir(_lowerCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , _lowerCAmelCase ) snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , ) snake_case : List[str] = True if config.feat_extract_norm == """layer""" else False snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case : List[Any] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) snake_case : Optional[Any] = HubertForCTC(_lowerCAmelCase ) else: snake_case : List[str] = HubertModel(_lowerCAmelCase ) if is_finetuned: snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) snake_case : List[Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
370
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): for attribute in key.split("." ): snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: snake_case : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case : Dict = value elif weight_type == "weight_g": snake_case : Optional[int] = value elif weight_type == "weight_v": snake_case : Optional[int] = value elif weight_type == "bias": snake_case : Tuple = value else: snake_case : Optional[int] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ): snake_case : int = [] snake_case : List[Any] = fairseq_model.state_dict() snake_case : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case : List[str] = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) snake_case : str = True else: for key, mapped_key in MAPPING.items(): snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case : Tuple = True if "*" in mapped_key: snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2] snake_case : Any = mapped_key.replace("*" , __lowerCamelCase ) if "weight_g" in name: snake_case : Optional[int] = "weight_g" elif "weight_v" in name: snake_case : Tuple = "weight_v" elif "bias" in name: snake_case : Dict = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case : str = "weight" else: snake_case : str = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ): snake_case : str = full_name.split("conv_layers." )[-1] snake_case : int = name.split("." ) snake_case : Optional[int] = int(items[0] ) snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case : Dict = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case : Optional[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ): if config_path is not None: snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase ) else: snake_case : str = UniSpeechSatConfig() snake_case : Tuple = "" if is_finetuned: snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase ) else: snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase ) snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case : Dict = model[0].eval() recursively_load_weights(__lowerCamelCase , __lowerCamelCase ) hf_wavavec.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
0
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' @property def snake_case_( self ) -> Any: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = ort.SessionOptions() _SCREAMING_SNAKE_CASE = False return options def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default _SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A ) _SCREAMING_SNAKE_CASE = """A red cat sitting on a park bench""" _SCREAMING_SNAKE_CASE = np.random.RandomState(0 ) _SCREAMING_SNAKE_CASE = pipe( prompt=A , image=A , mask_image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A , output_type="""np""" , ) _SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
58
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowercase_ = None lowercase_ = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowercase_ = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class a_ : '''simple docstring''' UpperCamelCase = True UpperCamelCase = None # Automatically constructed UpperCamelCase = "PIL.Image.Image" UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCamelCase = field(default='''Image''' , init=snake_case_ , repr=snake_case_ ) def __call__( self ) -> Tuple: return self.pa_type def snake_case_( self , A ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(A , A ): _SCREAMING_SNAKE_CASE = np.array(A ) if isinstance(A , A ): return {"path": value, "bytes": None} elif isinstance(A , A ): return {"path": None, "bytes": value} elif isinstance(A , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(A ) elif isinstance(A , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(A ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' ) def snake_case_( self , A , A=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' ) else: if is_local_path(A ): _SCREAMING_SNAKE_CASE = PIL.Image.open(A ) else: _SCREAMING_SNAKE_CASE = path.split("""::""" )[-1] try: _SCREAMING_SNAKE_CASE = string_to_dict(A , config.HUB_DATASETS_URL )["""repo_id"""] _SCREAMING_SNAKE_CASE = token_per_repo_id.get(A ) except ValueError: _SCREAMING_SNAKE_CASE = None with xopen(A , """rb""" , use_auth_token=A ) as f: _SCREAMING_SNAKE_CASE = BytesIO(f.read() ) _SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_ ) else: _SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def snake_case_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def snake_case_( self , A ) -> pa.StructArray: if pa.types.is_string(storage.type ): _SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() ) _SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() ) _SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: _SCREAMING_SNAKE_CASE = storage.field("""bytes""" ) else: _SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: _SCREAMING_SNAKE_CASE = storage.field("""path""" ) else: _SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() ) _SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _SCREAMING_SNAKE_CASE = pa.array( [encode_np_array(np.array(A ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() ) _SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(A , self.pa_type ) def snake_case_( self , A ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A ): with xopen(A , """rb""" ) as f: _SCREAMING_SNAKE_CASE = f.read() return bytes_ _SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) _SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(A , self.pa_type ) def lowerCamelCase ( ) ->List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->bytes: _SCREAMING_SNAKE_CASE = BytesIO() if image.format in list_image_compression_formats(): _SCREAMING_SNAKE_CASE = image.format else: _SCREAMING_SNAKE_CASE = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(__lowerCamelCase , format=__lowerCamelCase ) return buffer.getvalue() def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->dict: if hasattr(__lowerCamelCase , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )} def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) _SCREAMING_SNAKE_CASE = array.dtype _SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER _SCREAMING_SNAKE_CASE = dtype.kind _SCREAMING_SNAKE_CASE = dtype.itemsize _SCREAMING_SNAKE_CASE = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _SCREAMING_SNAKE_CASE = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' ) if dtype is not dest_dtype: warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _SCREAMING_SNAKE_CASE = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = np.dtype(__lowerCamelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' ) _SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(__lowerCamelCase ) ) return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )} def lowerCamelCase ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) ->List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = first_non_null_value(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(__lowerCamelCase , np.ndarray ): _SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase ) return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs] elif isinstance(__lowerCamelCase , PIL.Image.Image ): _SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase ) return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs] else: return objs else: return objs
58
1
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Dict = { '''Visual-Attention-Network/van-base''': ( '''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json''' ), } class __a ( UpperCAmelCase ): _a : Optional[int] = 'van' def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[64, 128, 320, 512] , _SCREAMING_SNAKE_CASE=[3, 3, 12, 3] , _SCREAMING_SNAKE_CASE=[8, 8, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1e-2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = mlp_ratios _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = dropout_rate
364
from __future__ import annotations from PIL import Image # Define glider example lowerCAmelCase__ :str = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example lowerCAmelCase__ :Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(a__ ) ): _UpperCAmelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours _UpperCAmelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(a__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(a__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(a__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. _UpperCAmelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(a__ ) return next_generation def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[Image.Image]: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): # Create output image _UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(a__ )) ) _UpperCAmelCase = img.load() # Save cells to image for x in range(len(a__ ) ): for y in range(len(cells[0] ) ): _UpperCAmelCase = 2_5_5 - cells[y][x] * 2_5_5 _UpperCAmelCase = (colour, colour, colour) # Save image images.append(a__ ) _UpperCAmelCase = new_generation(a__ ) return images if __name__ == "__main__": lowerCAmelCase__ :Tuple = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
185
0
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) snake_case_ : str = logging.getLogger(__name__) def A__ ( ): _UpperCamelCase : List[Any] = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' ) _UpperCamelCase : Any = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": _UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]` _UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": _UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>` _UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": _UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` _UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: _UpperCamelCase : List[Any] = fp.readlines() logger.info('Start encoding' ) logger.info(f'{len(UpperCAmelCase_ )} examples to process.' ) _UpperCamelCase : int = [] _UpperCamelCase : Any = 0 _UpperCamelCase : Any = 1_0_0_0_0 _UpperCamelCase : Optional[Any] = time.time() for text in data: _UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}' _UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) rslt.append(UpperCAmelCase_ ) iter += 1 if iter % interval == 0: _UpperCamelCase : Union[str, Any] = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) _UpperCamelCase : Tuple = time.time() logger.info('Finished binarization' ) logger.info(f'{len(UpperCAmelCase_ )} examples processed.' ) _UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle' _UpperCamelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 1_6): _UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt] else: _UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(UpperCAmelCase_ , 'wb' ) as handle: pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
83
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowercase__ ( lowercase ): @require_torch def UpperCamelCase_ ( self : Dict ): '''simple docstring''' # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task='fill-mask' ,model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network _UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _UpperCamelCase : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCamelCase : str = '1' _UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' # python one-liner segments # this must be loaded before socket.socket is monkey-patched _UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task='fill-mask' ,model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network _UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _UpperCamelCase : List[Any] = self.get_env() _UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _UpperCamelCase : Optional[Any] = self.get_env() _UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCamelCase : Dict = '1' _UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : int = '\nfrom transformers import pipeline\n ' _UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _UpperCamelCase : Union[str, Any] = self.get_env() _UpperCamelCase : List[Any] = '1' _UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )] _UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n ' _UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _UpperCamelCase : Optional[Any] = self.get_env() _UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _UpperCamelCase : List[Any] = '1' _UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
83
1
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json __A : Tuple = "sshleifer/mar_enro_6_3_student" class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" def __lowercase ( self : List[Any] ) -> Optional[Any]: super().setUp() lowerCAmelCase_ : Any = cached_path( """https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , ) lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k' @slow @require_torch_gpu def __lowercase ( self : str ) -> str: MarianMTModel.from_pretrained(lowerCamelCase ) @slow @require_torch_gpu def __lowercase ( self : List[Any] ) -> Union[str, Any]: lowerCAmelCase_ : str = { """$MAX_LEN""": 64, """$BS""": 64, """$GAS""": 1, """$ENRO_DIR""": self.data_dir, """facebook/mbart-large-cc25""": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", """--learning_rate=3e-5""": """--learning_rate 3e-4""", """--num_train_epochs 6""": """--num_train_epochs 1""", } # Clean up bash script lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip() lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) for k, v in env_vars_to_replace.items(): lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) ) lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split() # XXX: args.gpus > 1 : handle multi_gpu in the future lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ): lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser() lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase ) lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() ) lowerCAmelCase_ : Tuple = parser.parse_args() lowerCAmelCase_ : Dict = main(lowerCamelCase ) # Check metrics lowerCAmelCase_ : int = load_json(model.metrics_save_path ) lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0] lowerCAmelCase_ : Tuple = metrics["""val"""][-1] self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase ) self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase ) lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0] lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase ) lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" ) lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1 class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def __lowercase ( self : Optional[Any] ) -> Dict: lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro' lowerCAmelCase_ : Dict = { """--fp16_opt_level=O1""": """""", """$MAX_LEN""": 1_28, """$BS""": 16, """$GAS""": 1, """$ENRO_DIR""": data_dir, """$m""": """sshleifer/student_marian_en_ro_6_1""", """val_check_interval=0.25""": """val_check_interval=1.0""", } # Clean up bash script lowerCAmelCase_ : int = ( (self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip() ) lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ ) for k, v in env_vars_to_replace.items(): lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) ) lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir() lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" ) lowerCAmelCase_ : Dict = 6 lowerCAmelCase_ : List[Any] = ( ["""distillation.py"""] + bash_script.split() + [ F'--output_dir={output_dir}', """--gpus=1""", """--learning_rate=1e-3""", F'--num_train_epochs={epochs}', """--warmup_steps=10""", """--val_check_interval=1.0""", """--do_predict""", ] ) with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ): lowerCAmelCase_ : Dict = argparse.ArgumentParser() lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase ) lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() ) lowerCAmelCase_ : List[Any] = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu lowerCAmelCase_ : str = distill_main(lowerCamelCase ) # Check metrics lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path ) lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0] lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1] assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase ) # check lightning ckpt can be loaded and has a reasonable statedict lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase ) lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0] lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase ) lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" ) lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1
89
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=2 , lowerCamelCase : int=True , lowerCamelCase : str=False , lowerCamelCase : List[str]=10 , lowerCamelCase : Dict=3 , lowerCamelCase : str=32 * 4 , lowerCamelCase : Tuple=32 * 6 , lowerCamelCase : int=4 , lowerCamelCase : Optional[int]=32 , ) -> List[Any]: lowerCAmelCase_ : Tuple = parent lowerCAmelCase_ : int = batch_size lowerCAmelCase_ : Tuple = is_training lowerCAmelCase_ : str = use_auxiliary_loss lowerCAmelCase_ : Optional[Any] = num_queries lowerCAmelCase_ : List[str] = num_channels lowerCAmelCase_ : Optional[Any] = min_size lowerCAmelCase_ : Dict = max_size lowerCAmelCase_ : List[Any] = num_labels lowerCAmelCase_ : List[Any] = mask_feature_size def __lowercase ( self : str ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase ) lowerCAmelCase_ : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase ) lowerCAmelCase_ : List[Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase ) > 0.5 ).float() lowerCAmelCase_ : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase ) > 0.5).long() lowerCAmelCase_ : List[Any] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __lowercase ( self : Optional[int] ) -> Optional[int]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __lowercase ( self : Tuple ) -> Optional[Any]: lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def __lowercase ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Union[str, Any]: lowerCAmelCase_ : Any = output.encoder_hidden_states lowerCAmelCase_ : Dict = output.pixel_decoder_hidden_states lowerCAmelCase_ : str = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase ) , config.decoder_config.decoder_layers ) def __lowercase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=False ) -> List[Any]: with torch.no_grad(): lowerCAmelCase_ : int = MaskFormerModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCAmelCase_ : List[Any] = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase ) lowerCAmelCase_ : Union[str, Any] = model(lowerCamelCase , output_hidden_states=lowerCamelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase , lowerCamelCase ) def __lowercase ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Dict: lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() def comm_check_on_output(lowerCamelCase : Optional[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase ) lowerCAmelCase_ : List[str] = model(lowerCamelCase ) comm_check_on_output(lowerCamelCase ) lowerCAmelCase_ : Union[str, Any] = model( pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ) comm_check_on_output(lowerCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase): """simple docstring""" lowercase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowercase = ( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def __lowercase ( self : Dict ) -> Optional[Any]: lowerCAmelCase_ : Tuple = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase ) def __lowercase ( self : List[Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def __lowercase ( self : Tuple ) -> List[Any]: lowerCAmelCase_, lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase ) def __lowercase ( self : Union[str, Any] ) -> str: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def __lowercase ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def __lowercase ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def __lowercase ( self : Any ) -> str: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def __lowercase ( self : List[Any] ) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def __lowercase ( self : List[str] ) -> Optional[int]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowercase ( self : str ) -> Optional[int]: pass def __lowercase ( self : Tuple ) -> List[str]: lowerCAmelCase_, lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[Any] = model_class(lowerCamelCase ) lowerCAmelCase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : List[str] = [*signature.parameters.keys()] lowerCAmelCase_ : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase ) @slow def __lowercase ( self : Optional[int] ) -> List[str]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : List[str] = MaskFormerModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def __lowercase ( self : Dict ) -> int: lowerCAmelCase_ : Any = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : Dict = { """pixel_values""": torch.randn((2, 3, *size) , device=lowerCamelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=lowerCamelCase ), """class_labels""": torch.zeros(2 , 10 , device=lowerCamelCase ).long(), } lowerCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase ) lowerCAmelCase_ : str = model(**lowerCamelCase ) self.assertTrue(outputs.loss is not None ) def __lowercase ( self : int ) -> int: lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase ) def __lowercase ( self : Dict ) -> List[str]: lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(lowerCamelCase ).to(lowerCamelCase ) lowerCAmelCase_ : Tuple = model(**lowerCamelCase , output_attentions=lowerCamelCase ) self.assertTrue(outputs.attentions is not None ) def __lowercase ( self : int ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Optional[int] = self.all_model_classes[1] lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() lowerCAmelCase_ : str = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ).loss loss.backward() def __lowercase ( self : Any ) -> Union[str, Any]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : List[str] = self.all_model_classes[1] lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : List[str] = True lowerCAmelCase_ : List[str] = True lowerCAmelCase_ : Dict = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.train() lowerCAmelCase_ : Union[str, Any] = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ) lowerCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : List[str] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : Optional[int] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Tuple = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A : List[Any] = 1E-4 def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __snake_case ( unittest.TestCase): """simple docstring""" @cached_property def __lowercase ( self : Union[str, Any] ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def __lowercase ( self : List[Any] ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : Optional[int] = prepare_img() lowerCAmelCase_ : Dict = image_processor(lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase ) lowerCAmelCase_ : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**lowerCamelCase ) lowerCAmelCase_ : Any = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowerCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowerCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) lowerCAmelCase_ : Optional[Any] = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowerCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) def __lowercase ( self : Optional[int] ) -> Optional[int]: lowerCAmelCase_ : Union[str, Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase ) .eval() ) lowerCAmelCase_ : Union[str, Any] = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Optional[int] = image_processor(lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase ) lowerCAmelCase_ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**lowerCamelCase ) # masks_queries_logits lowerCAmelCase_ : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] lowerCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase ).to(lowerCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) # class_queries_logits lowerCAmelCase_ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Tuple = torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) def __lowercase ( self : Optional[int] ) -> List[str]: lowerCAmelCase_ : List[str] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(lowerCamelCase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase ) lowerCAmelCase_ : List[str] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowerCAmelCase_ : Optional[Any] = model(**lowerCamelCase ) # masks_queries_logits lowerCAmelCase_ : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] lowerCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) # class_queries_logits lowerCAmelCase_ : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) ) def __lowercase ( self : Union[str, Any] ) -> List[str]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase ) .eval() ) lowerCAmelCase_ : List[str] = self.default_image_processor lowerCAmelCase_ : int = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) lowerCAmelCase_ : List[str] = inputs["""pixel_values"""].to(lowerCamelCase ) lowerCAmelCase_ : Tuple = [el.to(lowerCamelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase_ : Union[str, Any] = [el.to(lowerCamelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase_ : Any = model(**lowerCamelCase ) self.assertTrue(outputs.loss is not None )
89
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a = logging.get_logger(__name__) a = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class SCREAMING_SNAKE_CASE__ ( _a ): _a = 'dpt' def __init__( self : int , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Optional[Any]=384 , lowerCAmelCase : str=16 , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=[2, 5, 8, 11] , lowerCAmelCase : Tuple="project" , lowerCAmelCase : Optional[int]=[4, 2, 1, 0.5] , lowerCAmelCase : Any=[96, 192, 384, 768] , lowerCAmelCase : int=256 , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Dict=255 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=[1, 1024, 24, 24] , lowerCAmelCase : str=[0, 1] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ): super().__init__(**lowerCAmelCase ) lowerCAmelCase = hidden_size lowerCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""" ) lowerCAmelCase = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, } lowerCAmelCase = BitConfig(**lowerCAmelCase ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): logger.info("""Initializing the config with a `BiT` backbone.""" ) lowerCAmelCase = BitConfig(**lowerCAmelCase ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): lowerCAmelCase = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) lowerCAmelCase = backbone_featmap_shape lowerCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" ) else: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = [] lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = qkv_bias lowerCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" ) lowerCAmelCase = readout_type lowerCAmelCase = reassemble_factors lowerCAmelCase = neck_hidden_sizes lowerCAmelCase = fusion_hidden_size lowerCAmelCase = head_in_index lowerCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) lowerCAmelCase = use_auxiliary_head lowerCAmelCase = auxiliary_loss_weight lowerCAmelCase = semantic_loss_ignore_index lowerCAmelCase = semantic_classifier_dropout def __lowercase ( self : Any ): lowerCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase = self.backbone_config.to_dict() lowerCAmelCase = self.__class__.model_type return output
155
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=_a ) class SCREAMING_SNAKE_CASE__ ( _a ): _a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _a = Features({'audio': Audio()} ) _a = Features({'labels': ClassLabel} ) _a = "audio" _a = "labels" def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowerCAmelCase = copy.deepcopy(self ) lowerCAmelCase = self.label_schema.copy() lowerCAmelCase = features[self.label_column] lowerCAmelCase = label_schema return task_template @property def __lowercase ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
155
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = '''new-model''' if is_tf_available(): class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = NewModelConfig @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Optional[int] = "bert-base-cased" SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : List[str] = "bert-base-cased" SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : List[str] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Optional[Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : int ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Tuple ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : Optional[int] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _A ( self : List[str] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @slow @require_tensorflow_probability def _A ( self : Optional[Any] ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 ) def _A ( self : int ): SCREAMING_SNAKE_CASE : int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 ) def _A ( self : int ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(model.config ) SCREAMING_SNAKE_CASE : Optional[Any] = ["FunnelBaseModel"] SCREAMING_SNAKE_CASE : str = TFAutoModel.from_config(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Optional[int] ): try: AutoConfig.register("new-model" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCAmelCase_ ): auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE : Tuple = BertModelTester(self ).get_config() SCREAMING_SNAKE_CASE : Optional[int] = NewModelConfig(**tiny_config.to_dict() ) SCREAMING_SNAKE_CASE : Optional[Any] = auto_class.from_config(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = auto_class.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _A ( self : Optional[int] ): with self.assertRaisesRegex( UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ): SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("bert-base" ) def _A ( self : Dict ): with self.assertRaisesRegex( UpperCAmelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" ) def _A ( self : Tuple ): with self.assertRaisesRegex( UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def _A ( self : Optional[int] ): with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model" ): SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def _A ( self : Tuple ): # Make sure we have cached the model. SCREAMING_SNAKE_CASE : int = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint SCREAMING_SNAKE_CASE : Any = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
319
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
319
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": snake_case : str = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) snake_case : Union[str, Any] = parser.parse_args() snake_case : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) snake_case : Tuple = CLIPImageProcessor() snake_case : Any = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") snake_case : List[str] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
281
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _snake_case : @staticmethod def SCREAMING_SNAKE_CASE ( *_a , **_a ): pass def lowerCAmelCase_ ( _snake_case : Image ) -> str: '''simple docstring''' __magic_name__ : Optional[int] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def lowerCAmelCase_ ( _snake_case : Image ) -> Dict: '''simple docstring''' __magic_name__ : List[Any] = np.array(_snake_case ) __magic_name__ : Optional[int] = npimg.shape return {"hash": hashimage(_snake_case ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _snake_case ( unittest.TestCase ): UpperCamelCase__ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) UpperCamelCase__ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): __magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def SCREAMING_SNAKE_CASE ( self , _a , _a ): pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def SCREAMING_SNAKE_CASE ( self ): pass @slow @require_torch def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) __magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 ) # Shortening by hashing __magic_name__ : Dict = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, {"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67}, {"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93}, {"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09}, {"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79}, {"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34}, {"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16}, {"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12}, {"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99}, {"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52}, {"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32}, {"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16}, {"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99}, {"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83}, {"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64}, {"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08}, {"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35}, {"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26}, {"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62}, {"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99}, {"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86}, {"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84}, {"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73}, {"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71} ] , ) # fmt: on @require_torch @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = "facebook/sam-vit-huge" __magic_name__ : str = pipeline("mask-generation" , model=_a ) __magic_name__ : Tuple = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __magic_name__ : Any = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, ] , )
281
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowercase : Optional[Any] = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _lowercase : Any = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _lowercase : Optional[Any] = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } _lowercase : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } _lowercase : Optional[int] = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } _lowercase : Tuple = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } _lowercase : List[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } _lowercase : Dict = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } _lowercase : List[str] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __magic_name__ ( _UpperCAmelCase ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __magic_name__ ( _UpperCAmelCase ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION _lowercase : Optional[int] = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) _lowercase : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) _lowercase : int = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(_UpperCAmelCase ) class __magic_name__ : def __call__( self : int , lowercase_ : int , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Union[bool, str] = False , lowercase_ : Union[bool, str] = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , **lowercase_ : int , ): if titles is None and texts is None: return super().__call__( lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) elif titles is None or texts is None: lowercase_ : Tuple = titles if texts is None else texts return super().__call__( lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) lowercase_ : List[Any] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles] lowercase_ : Dict = texts if not isinstance(lowercase_ , lowercase_ ) else [texts] lowercase_ : List[Any] = len(lowercase_ ) lowercase_ : Optional[Any] = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages if len(lowercase_ ) != len(lowercase_ ): raise ValueError( f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' ) lowercase_ : Any = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""] lowercase_ : str = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""] lowercase_ : List[Any] = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ ) ] } if return_attention_mask is not False: lowercase_ : int = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowercase_ : int = attention_mask return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : BatchEncoding , lowercase_ : DPRReaderOutput , lowercase_ : int = 16 , lowercase_ : int = 64 , lowercase_ : int = 4 , ): lowercase_ : Optional[int] = reader_input["""input_ids"""] lowercase_ : Optional[int] = reader_output[:3] lowercase_ : Union[str, Any] = len(lowercase_ ) lowercase_ : List[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ ) lowercase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowercase_ : Optional[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowercase_ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowercase_ : Any = sequence_ids.index(self.pad_token_id ) else: lowercase_ : Dict = len(lowercase_ ) lowercase_ : Any = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowercase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[int] , lowercase_ : List[int] , lowercase_ : int , lowercase_ : int , ): lowercase_ : Tuple = [] for start_index, start_score in enumerate(lowercase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowercase_ : str = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ ) lowercase_ : Dict = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) lowercase_ : Optional[int] = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowercase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_UpperCAmelCase ) class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
362
'''simple docstring''' def lowerCamelCase ( ) -> Dict: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = 1 while len(UpperCAmelCase__ ) < 1e6: constant.append(str(UpperCAmelCase__ ) ) i += 1 lowercase_ : int = """""".join(UpperCAmelCase__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
21
0
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder _A : List[str] =datasets.utils.logging.get_logger(__name__) class _lowercase ( folder_based_builder.FolderBasedBuilderConfig ): a = None a = None class _lowercase ( folder_based_builder.FolderBasedBuilder ): a = datasets.Audio() a = """audio""" a = AudioFolderConfig a = 42 # definition at the bottom of the script a = AudioClassification(audio_column="""audio""" , label_column="""label""" ) _A : Any =[ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] _A : Optional[int] =AUDIO_EXTENSIONS
41
'''simple docstring''' from __future__ import annotations _A : Any ={ '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class _lowercase : def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ): lowerCamelCase__ : str = graph # mapping node to its parent in resulting breadth first tree lowerCamelCase__ : dict[str, str | None] = {} lowerCamelCase__ : Any = source_vertex def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : List[str] = {self.source_vertex} lowerCamelCase__ : List[str] = None lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue while queue: lowerCamelCase__ : Tuple = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(UpperCamelCase__ ) lowerCamelCase__ : List[str] = vertex queue.append(UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: str ): if target_vertex == self.source_vertex: return self.source_vertex lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ ) if target_vertex_parent is None: lowerCamelCase__ : int = ( F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(UpperCamelCase__ ) return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}''' if __name__ == "__main__": _A : int =Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
41
1
def __UpperCamelCase ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ : Optional[Any] = abs(lowercase__ ) lowerCAmelCase_ : int = 0 while n > 0: res += n % 10 n //= 10 return res def __UpperCamelCase ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ : List[str] = abs(lowercase__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def __UpperCamelCase ( lowercase__ : int ) -> int: '''simple docstring''' return sum(int(lowercase__ ) for c in str(abs(lowercase__ ) ) ) def __UpperCamelCase ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowercase__ : Callable , lowercase__ : int ) -> None: lowerCAmelCase_ : Union[str, Any] = f'{func.__name__}({value})' lowerCAmelCase_ : Any = timeit(f'__main__.{call}' , setup="""import __main__""" ) print(f'{call:56} = {func(lowercase__ )} -- {timing:.4f} seconds' ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowercase__ , lowercase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
28
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __a ( unittest.TestCase ): def A ( self : List[Any] ): lowerCAmelCase_ : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) lowerCAmelCase_ : Optional[Any] = Vector() def A ( self : List[str] ): lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" ) def A ( self : Any ): lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(UpperCAmelCase ) , 4 ) def A ( self : Dict ): lowerCAmelCase_ : Dict = Vector([1, 2] ) lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] ) lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def A ( self : Optional[Any] ): lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] ) lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def A ( self : Optional[Any] ): lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] ) lowerCAmelCase_ : Dict = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def A ( self : Union[str, Any] ): lowerCAmelCase_ : Dict = Vector([1, 2, 3] ) lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product lowerCAmelCase_ : str = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def A ( self : List[str] ): self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def A ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def A ( self : Optional[Any] ): lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] ) lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" ) def A ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] ) lowerCAmelCase_ : int = x.copy() self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) ) def A ( self : Union[str, Any] ): lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" ) def A ( self : Any ): lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) ) def A ( self : Optional[int] ): lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) ) def A ( self : Tuple ): lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) ) def A ( self : Optional[int] ): lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def A ( self : Optional[int] ): lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) lowerCAmelCase_ : Any = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def A ( self : Tuple ): lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) ) def A ( self : Optional[int] ): lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def A ( self : Dict ): lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def A ( self : Union[str, Any] ): lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def A ( self : Optional[int] ): self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
28
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness SCREAMING_SNAKE_CASE :Dict = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' SCREAMING_SNAKE_CASE :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' SCREAMING_SNAKE_CASE :Dict = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' SCREAMING_SNAKE_CASE :Union[str, Any] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' SCREAMING_SNAKE_CASE :List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[Any] ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) ,homepage="https://github.com/openai/human-eval" ,codebase_urls=["https://github.com/openai/human-eval"] ,reference_urls=["https://github.com/openai/human-eval"] ,license=_LICENSE ,) def UpperCamelCase_ ( self : str ,A : int ,A : Union[str, Any] ,A : Optional[int]=[1, 10, 1_00] ,A : Union[str, Any]=4 ,A : Dict=3.0 ): if os.getenv("HF_ALLOW_CODE_EVAL" ,0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=A ) as executor: __A = [] __A = Counter() __A = 0 __A = defaultdict(A ) for task_id, (candidates, test_case) in enumerate(zip(A ,A ) ): for candidate in candidates: __A = candidate + "\n" + test_case __A = (test_program, timeout, task_id, completion_id[task_id]) __A = executor.submit(A ,*A ) futures.append(A ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(A ): __A = future.result() results[result["task_id"]].append((result["completion_id"], result) ) __A , __A = [], [] for result in results.values(): result.sort() __A = [r[1]["passed"] for r in result] total.append(len(A ) ) correct.append(sum(A ) ) __A = np.array(A ) __A = np.array(A ) __A = k __A = {f'''pass@{k}''': estimate_pass_at_k(A ,A ,A ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]: """simple docstring""" def estimator(a_ , a_ , a_ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(a_ , a_ ): __A = itertools.repeat(a_ , len(a_ ) ) else: assert len(a_ ) == len(a_ ) __A = iter(a_ ) return np.array([estimator(int(a_ ) , int(a_ ) , a_ ) for n, c in zip(a_ , a_ )] )
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
1
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
104
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[int]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ ) else: _a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ ) for i, tensor in enumerate(lowerCAmelCase__ ): if padding_side == "right": if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _a = tensor[:sequence_length] else: _a = tensor[:sequence_length] else: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _a = tensor[:sequence_length] else: _a = tensor[:sequence_length] return out_tensor.tolist() def _A (lowerCAmelCase__ :Any ) -> Union[str, Any]: '''simple docstring''' _a = ord(lowerCAmelCase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True _a = unicodedata.category(lowerCAmelCase__ ) if cat.startswith('P' ): return True return False @dataclass class a ( _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = 42 _lowerCAmelCase = True _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = -1_0_0 _lowerCAmelCase = "pt" def __UpperCAmelCase ( self , __magic_name__ ) -> Any: import torch _a = 'label' if 'label' in features[0].keys() else 'labels' _a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _a = self.tokenizer.pad( __magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , ) if labels is None: return batch _a = torch.tensor(batch['entity_ids'] ).shape[1] _a = self.tokenizer.padding_side if padding_side == "right": _a = [ list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels ] else: _a = [ [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels ] _a = [feature['ner_tags'] for feature in features] _a = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ ) _a = [feature['original_entity_spans'] for feature in features] _a = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ ) _a = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
104
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=1_026 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ): '''simple docstring''' set_seed(3 ) # generate train_data and objective_set __UpperCamelCase , __UpperCamelCase :Optional[Any] = generate_datasets( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=1_026 , trim=SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model __UpperCamelCase :str = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE ) print('''computing perplexity on objective set''' ) __UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item() print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE="igf_model.pt" , ): '''simple docstring''' set_seed(42 ) # Load pre-trained model __UpperCamelCase :str = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model __UpperCamelCase :List[str] = SecondaryLearner(SCREAMING_SNAKE_CASE ) # Train secondary learner __UpperCamelCase :Tuple = train_secondary_learner( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=1_000 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ): '''simple docstring''' __UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) __UpperCamelCase :Tuple = RandomSampler(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1 __UpperCamelCase :Optional[int] = 0 __UpperCamelCase :int = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(SCREAMING_SNAKE_CASE ) secondary_learner.eval() __UpperCamelCase :List[str] = [] __UpperCamelCase :str = 0 __UpperCamelCase :int = [] __UpperCamelCase :int = [] # Compute the performance of the transformer model at the beginning __UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) test_perps.append(SCREAMING_SNAKE_CASE ) print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE ) for epoch in range(int(SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() __UpperCamelCase :Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) __UpperCamelCase :Tuple = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = True if secondary_learner is not None: __UpperCamelCase :List[Any] = secondary_learner.forward( torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __UpperCamelCase :List[Any] = -1 if predicted_q < threshold: __UpperCamelCase :List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __UpperCamelCase :int = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __UpperCamelCase :Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __UpperCamelCase :Tuple = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) test_perps.append(SCREAMING_SNAKE_CASE ) print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[str] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner __UpperCamelCase :Optional[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner __UpperCamelCase :str = training_secondary_learner( SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model __UpperCamelCase :Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __UpperCamelCase , __UpperCamelCase :Dict = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
43
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , "vision") self.check_model_type(UpperCAmelCase_) def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]: '''simple docstring''' if "text_queries" in kwargs: lowerCamelCase__: Any =kwargs.pop("text_queries") if isinstance(UpperCAmelCase_ , (str, Image.Image)): lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels} else: lowerCamelCase__: Any =image lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) return results def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] ={} if "threshold" in kwargs: lowerCamelCase__: List[Any] =kwargs["threshold"] if "top_k" in kwargs: lowerCamelCase__: Any =kwargs["top_k"] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =load_image(inputs["image"]) lowerCamelCase__: Dict =inputs["candidate_labels"] if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Any =candidate_labels.split(",") lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(UpperCAmelCase_): lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework) lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework) yield { "is_last": i == len(UpperCAmelCase_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model_inputs.pop("target_size") lowerCamelCase__: Dict =model_inputs.pop("candidate_label") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[] for model_output in model_outputs: lowerCamelCase__: Optional[Any] =model_output["candidate_label"] lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_) lowerCamelCase__: Dict =self.image_processor.post_process_object_detection( outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): lowerCamelCase__: Dict =outputs["scores"][index].item() lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0]) lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box} results.append(UpperCAmelCase_) lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_) if top_k: lowerCamelCase__: Dict =results[:top_k] return results def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist() lowerCamelCase__: Optional[int] ={ "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
10
0
def a__ ( _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 7 , _SCREAMING_SNAKE_CASE : int = 1_00_00_00 ) -> int: """simple docstring""" UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : str = 1 for current_denominator in range(1 , limit + 1 ): UpperCAmelCase_ : Tuple = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase_ : Optional[int] = current_numerator UpperCAmelCase_ : Tuple = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=100_0000))
356
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder _lowerCamelCase = datasets.utils.logging.get_logger(__name__) class _snake_case (folder_based_builder.FolderBasedBuilderConfig): __A : bool =None __A : bool =None class _snake_case (folder_based_builder.FolderBasedBuilder): __A : Union[str, Any] =datasets.Audio() __A : Optional[int] ="audio" __A : Any =AudioFolderConfig __A : List[str] # definition at the bottom of the script __A : Optional[int] =AudioClassification(audio_column="audio" , label_column="label") _lowerCamelCase = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] _lowerCamelCase = AUDIO_EXTENSIONS
67
0
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase__ : """simple docstring""" def __init__( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : float ) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = probability def UpperCAmelCase__ ( self : str ) -> list[str]: """simple docstring""" return list(self.connections ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(a__ , a__ , a__ ) __SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() ) __SCREAMING_SNAKE_CASE = start for _ in range(a__ ): __SCREAMING_SNAKE_CASE = graph.transition(a__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
267
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = int(a__ ) # Initialize Result __SCREAMING_SNAKE_CASE = [] # Traverse through all denomination for denomination in reversed(a__ ): # Find denominations while int(a__ ) >= int(a__ ): total_value -= int(a__ ) answer.append(a__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase : Dict = [] UpperCAmelCase : List[str] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) UpperCAmelCase : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase : Any = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"""Following is minimal change for {value}: """) UpperCAmelCase : Any = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
267
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { 'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class lowercase_ ( A ): """simple docstring""" lowerCamelCase_ = '''poolformer''' def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Union[str, Any]=1_6 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=4.0 , __lowerCamelCase : Optional[int]=[2, 2, 6, 2] , __lowerCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCamelCase : str=[7, 3, 3, 3] , __lowerCamelCase : str=[4, 2, 2, 2] , __lowerCamelCase : str=[2, 1, 1, 1] , __lowerCamelCase : Any=4 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=1e-5 , __lowerCamelCase : List[str]=0.0_2 , **__lowerCamelCase : Dict , ): """simple docstring""" _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = stride _SCREAMING_SNAKE_CASE = padding _SCREAMING_SNAKE_CASE = pool_size _SCREAMING_SNAKE_CASE = hidden_sizes _SCREAMING_SNAKE_CASE = mlp_ratio _SCREAMING_SNAKE_CASE = depths _SCREAMING_SNAKE_CASE = patch_sizes _SCREAMING_SNAKE_CASE = strides _SCREAMING_SNAKE_CASE = num_encoder_blocks _SCREAMING_SNAKE_CASE = drop_path_rate _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = use_layer_scale _SCREAMING_SNAKE_CASE = layer_scale_init_value _SCREAMING_SNAKE_CASE = initializer_range super().__init__(**__lowerCamelCase ) class lowercase_ ( A ): """simple docstring""" lowerCamelCase_ = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" return 2e-3
111
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowercase_ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) ) def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) ) def lowerCAmelCase_ ( self : int ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" # pass variant but use the non-variant filenames _SCREAMING_SNAKE_CASE = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : int ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _SCREAMING_SNAKE_CASE = "fp16" self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : str ): """simple docstring""" # pass variant but use the non-variant filenames _SCREAMING_SNAKE_CASE = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _SCREAMING_SNAKE_CASE = "fp16" self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
111
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @property def _snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _snake_case ( self ): lowercase__: Any = ort.SessionOptions() lowercase__: Optional[Any] = False return options def _snake_case ( self ): lowercase__: int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowercase__: Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowercase__: Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: List[Any] = '''A red cat sitting on a park bench''' lowercase__: int = np.random.RandomState(0 ) lowercase__: List[str] = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , ) lowercase__: Tuple = output.images lowercase__: int = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowercase__: Union[str, Any] = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case ( self ): lowercase__: Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowercase__: int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowercase__: Union[str, Any] = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) lowercase__: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Any = '''A red cat sitting on a park bench''' lowercase__: List[str] = np.random.RandomState(0 ) lowercase__: Dict = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , ) lowercase__: Union[str, Any] = output.images lowercase__: Any = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowercase__: Union[str, Any] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
177
"""simple docstring""" __A = [ (1_0_0_0, "M"), (9_0_0, "CM"), (5_0_0, "D"), (4_0_0, "CD"), (1_0_0, "C"), (9_0, "XC"), (5_0, "L"), (4_0, "XL"), (1_0, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} lowercase__: List[str] = 0 lowercase__: List[Any] = 0 while place < len(__UpperCAmelCase ): if (place + 1 < len(__UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: lowercase__: Optional[Any] = [] for arabic, roman in ROMAN: ((lowercase__), (lowercase__)): Tuple = divmod(__UpperCAmelCase , __UpperCAmelCase ) result.append(roman * factor ) if number == 0: break return "".join(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
177
1
from collections.abc import Callable class a__ : def __init__( self , UpperCAmelCase = None ) -> None: # Stores actual heap items. __a = [] # Stores indexes of each item for supporting updates and deletion. __a = {} # Stores current size of heap. __a = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __a = key or (lambda UpperCAmelCase : x) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int | None: return int((i - 1) / 2 ) if i > 0 else None def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int | None: __a = int(2 * i + 1 ) return left if 0 < left < self.size else None def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int | None: __a = int(2 * i + 2 ) return right if 0 < right < self.size else None def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None: __a , __a = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __a , __a = self.arr[j], self.arr[i] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> bool: return self.arr[i][1] < self.arr[j][1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int: __a = self._left(UpperCAmelCase ) __a = self._right(UpperCAmelCase ) __a = i if left is not None and not self._cmp(UpperCAmelCase , UpperCAmelCase ): __a = left if right is not None and not self._cmp(UpperCAmelCase , UpperCAmelCase ): __a = right return valid_parent def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None: __a = self._parent(UpperCAmelCase ) while parent is not None and not self._cmp(UpperCAmelCase , UpperCAmelCase ): self._swap(UpperCAmelCase , UpperCAmelCase ) __a , __a = parent, self._parent(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None: __a = self._get_valid_parent(UpperCAmelCase ) while valid_parent != index: self._swap(UpperCAmelCase , UpperCAmelCase ) __a , __a = valid_parent, self._get_valid_parent(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None: if item not in self.pos_map: return __a = self.pos_map[item] __a = [item, self.key(UpperCAmelCase )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(UpperCAmelCase ) self._heapify_down(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> None: if item not in self.pos_map: return __a = self.pos_map[item] del self.pos_map[item] __a = self.arr[self.size - 1] __a = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(UpperCAmelCase ) self._heapify_down(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> None: __a = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(UpperCAmelCase )] ) else: __a = [item, self.key(UpperCAmelCase )] __a = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __SCREAMING_SNAKE_CASE ( self ) -> tuple | None: return self.arr[0] if self.size else None def __SCREAMING_SNAKE_CASE ( self ) -> tuple | None: __a = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase( ): pass if __name__ == "__main__": import doctest doctest.testmod()
353
from ..utils import DummyObject, requires_backends class a__ ( metaclass=__snake_case ): A__ : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Dict = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Dict = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class a__ ( metaclass=__snake_case ): A__ : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(cls , ['torch', 'transformers', 'onnx'] )
197
0
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = """new-model""" if is_tf_available(): class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = NewModelConfig @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' A: int = '''bert-base-cased''' A: Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : str ) -> str: '''simple docstring''' A: Tuple = '''bert-base-cased''' A: str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) A , A: Dict = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : str ) -> List[Any]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) A , A: int = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) A , A: Any = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' for model_name in ["bert-base-uncased"]: A: Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Optional[int] ) -> Any: '''simple docstring''' for model_name in ["bert-base-uncased"]: A: Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow @require_tensorflow_probability def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: A: int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ ) A , A: Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: Optional[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_ ) , 1_44_10 ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' A: List[str] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_ ) , 1_44_10 ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: str = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = copy.deepcopy(model.config ) A: Optional[Any] = ['''FunnelBaseModel'''] A: Union[str, Any] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Any = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' try: AutoConfig.register('''new-model''' , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now that the config is registered, it can be used as any other config with the auto-API A: Optional[int] = BertModelTester(self ).get_config() A: List[str] = NewModelConfig(**tiny_config.to_dict() ) A: Any = auto_class.from_config(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Dict = auto_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''bert-base is not a local folder and is not a valid model identifier''' ): A: List[Any] = TFAutoModel.from_pretrained('''bert-base''' ) def _snake_case ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A: int = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='''aaaaaa''' ) def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): A: Dict = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def _snake_case ( self : Union[str, Any] ) -> int: '''simple docstring''' with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''Use `from_pt=True` to load this model''' ): A: Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def _snake_case ( self : Dict ) -> int: '''simple docstring''' A: Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: A: Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint A: List[str] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: A: str = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_2 , snake_case_=2 , snake_case_=3 , snake_case_=1_6 , snake_case_=[1, 2, 1] , snake_case_=[2, 2, 4] , snake_case_=2 , snake_case_=2.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=True , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=1_0 , snake_case_=8 , snake_case_=["stage1", "stage2", "stage3"] , snake_case_=[1, 2, 3] , ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : int = patch_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : str = mlp_ratio UpperCAmelCase_ : Union[str, Any] = qkv_bias UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Union[str, Any] = use_absolute_embeddings UpperCAmelCase_ : List[str] = patch_norm UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : int = scope UpperCAmelCase_ : Optional[int] = use_labels UpperCAmelCase_ : Tuple = type_sequence_label_size UpperCAmelCase_ : Tuple = encoder_stride UpperCAmelCase_ : List[Any] = out_features UpperCAmelCase_ : Union[str, Any] = out_indices def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Dict = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def _UpperCamelCase ( self ): '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Dict = MaskFormerSwinModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(snake_case_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Any = MaskFormerSwinBackbone(config=snake_case_ ) model.to(snake_case_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(snake_case_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(snake_case_ ): UpperCAmelCase_ : str = ['stem'] UpperCAmelCase_ : Tuple = MaskFormerSwinBackbone(config=snake_case_ ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : int = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ :Any = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase_ :Optional[int] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase_ :Optional[int] = False lowerCamelCase_ :Optional[int] = False lowerCamelCase_ :List[str] = False lowerCamelCase_ :Dict = False lowerCamelCase_ :str = False def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = MaskFormerSwinModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase ( self ): '''simple docstring''' return def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case_ ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(snake_case_ ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case_ ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : str = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) UpperCAmelCase_ : Dict = outputs.hidden_states UpperCAmelCase_ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case_ ) , snake_case_ ) # Swin has a different seq_length UpperCAmelCase_ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[Any] = True self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = 3 UpperCAmelCase_ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Tuple = True self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case_ ): UpperCAmelCase_ : Union[str, Any] = 0 return t def check_equivalence(snake_case_ , snake_case_ , snake_case_ , snake_case_={} ): with torch.no_grad(): UpperCAmelCase_ : Any = model(**snake_case_ , return_dict=snake_case_ , **snake_case_ ) UpperCAmelCase_ : List[str] = model(**snake_case_ , return_dict=snake_case_ , **snake_case_ ).to_tuple() def recursive_check(snake_case_ , snake_case_ ): if isinstance(snake_case_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case_ , snake_case_ ): recursive_check(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case_ , snake_case_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case_ ) , set_nan_tensor_to_zero(snake_case_ ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(snake_case_ ).any()} and `inf`: {torch.isinf(snake_case_ )}. Dict has''' F''' `nan`: {torch.isnan(snake_case_ ).any()} and `inf`: {torch.isinf(snake_case_ )}.''' ) , ) recursive_check(snake_case_ , snake_case_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() UpperCAmelCase_ : int = self._prepare_for_class(snake_case_ , snake_case_ ) UpperCAmelCase_ : str = self._prepare_for_class(snake_case_ , snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) UpperCAmelCase_ : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ : Dict = self._prepare_for_class(snake_case_ , snake_case_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ , {'output_hidden_states': True} ) UpperCAmelCase_ : str = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) UpperCAmelCase_ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ , {'output_hidden_states': True} ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCamelCase_ ): '''simple docstring''' lowerCamelCase_ :Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase_ :List[Any] = MaskFormerSwinConfig def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[str] = MaskFormerSwinModelTester(self ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = backbone_class(snake_case_ ) backbone.to(snake_case_ ) backbone.eval() UpperCAmelCase_ : Any = backbone(**snake_case_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True UpperCAmelCase_ : List[str] = backbone(**snake_case_ , output_hidden_states=snake_case_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: UpperCAmelCase_ : Union[str, Any] = backbone(**snake_case_ , output_attentions=snake_case_ ) self.assertIsNotNone(outputs.attentions )
274
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : Optional[Any] = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase_ :List[str] = '''autoformer''' lowerCamelCase_ :Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = [1, 2, 3, 4, 5, 6, 7] , snake_case_ = True , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = "gelu" , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_ = True , snake_case_=True , snake_case_ = 1_0 , snake_case_ = 2_5 , snake_case_ = 3 , **snake_case_ , ): '''simple docstring''' UpperCAmelCase_ : List[Any] = prediction_length UpperCAmelCase_ : List[str] = context_length if context_length is not None else prediction_length UpperCAmelCase_ : Optional[int] = distribution_output UpperCAmelCase_ : Optional[int] = loss UpperCAmelCase_ : Union[str, Any] = input_size UpperCAmelCase_ : int = num_time_features UpperCAmelCase_ : List[str] = lags_sequence UpperCAmelCase_ : Any = scaling UpperCAmelCase_ : Any = num_dynamic_real_features UpperCAmelCase_ : int = num_static_real_features UpperCAmelCase_ : Optional[Any] = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(snake_case_ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : List[Any] = cardinality else: UpperCAmelCase_ : Optional[int] = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(snake_case_ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) UpperCAmelCase_ : List[str] = embedding_dimension else: UpperCAmelCase_ : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ : List[str] = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features UpperCAmelCase_ : str = d_model UpperCAmelCase_ : str = encoder_attention_heads UpperCAmelCase_ : str = decoder_attention_heads UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : str = decoder_ffn_dim UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : str = decoder_layers UpperCAmelCase_ : str = dropout UpperCAmelCase_ : Optional[int] = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : Any = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Union[str, Any] = use_cache # Autoformer UpperCAmelCase_ : Any = label_length UpperCAmelCase_ : Union[str, Any] = moving_average UpperCAmelCase_ : Tuple = autocorrelation_factor super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ ) @property def _UpperCamelCase ( self ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
274
1
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) ->Tuple: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase__ : Tuple =flax_key_tuple[:-1] + ('weight',) lowerCamelCase__ : int =torch.permute(lowerCamelCase_ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ): # linear layer lowerCamelCase__ : Any =flax_key_tuple[:-1] + ('weight',) lowerCamelCase__ : int =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__ : List[str] =flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int ) ->List[str]: if "metadata" in layer: lowerCamelCase__ : Any =layer.split('metadata' ) lowerCamelCase__ : Union[str, Any] =''.join(split_layer[0] )[:-1] lowerCamelCase__ : List[str] =[tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: lowerCamelCase__ : str =layer.split('kvstore' ) lowerCamelCase__ : Union[str, Any] =''.join(split_layer[0] )[:-1] lowerCamelCase__ : Optional[Any] =[tuple(('kvstore' + split_layer[1]).split('/' ) )] else: lowerCamelCase__ : int =layer.split('/' ) lowerCamelCase__ : Tuple ='/'.join(split_layer[:-1] ) lowerCamelCase__ : Optional[Any] =(split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase__ : str =f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: lowerCamelCase__ : List[str] ='file' else: lowerCamelCase__ : Optional[int] =checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : int ) ->Dict: lowerCamelCase__ : Tuple =rename_keys(lowerCamelCase_ ) lowerCamelCase__ : List[str] ={} for k, v in current_block.items(): lowerCamelCase__ : Dict =v lowerCamelCase__ : Any =new_current_block torch.save(lowerCamelCase_ , lowerCamelCase_ ) def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple = WEIGHTS_NAME ) ->Dict: lowerCamelCase__ : Optional[Any] =convert_file_size_to_int(lowerCamelCase_ ) lowerCamelCase__ : Any =[] lowerCamelCase__ : Optional[Any] ={} lowerCamelCase__ : Any =0 lowerCamelCase__ : str =0 os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: lowerCamelCase__ : Union[str, Any] =serialization.msgpack_restore(fp.read() )['optimizer']['target'] lowerCamelCase__ : Optional[int] =flatten_dict(lowerCamelCase_ , sep='/' ) lowerCamelCase__ : str ={} for layer in checkpoint_info.keys(): lowerCamelCase__ : List[Any] =get_key_and_tensorstore_dict( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if curr_real_layer_name in all_layers: lowerCamelCase__ : List[Any] =content else: lowerCamelCase__ : Dict ={split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase__ : Dict =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase__ : Tuple =torch.tensor(lowerCamelCase_ ) lowerCamelCase__ : Dict =raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase__ : str =rename_base_flax_keys(tuple(key.split('/' ) ) , lowerCamelCase_ ) lowerCamelCase__ : Any ='/'.join(lowerCamelCase_ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase__ : List[str] =os.path.join( lowerCamelCase_ , weights_name.replace('.bin' , f"""-{len(lowerCamelCase_ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase__ : Any ={} lowerCamelCase__ : str =0 lowerCamelCase__ : List[str] =raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase__ : List[Any] =os.path.join(lowerCamelCase_ , weights_name.replace('.bin' , f"""-{len(lowerCamelCase_ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(lowerCamelCase_ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase__ : int ={} lowerCamelCase__ : Optional[Any] ={} for idx, shard in enumerate(lowerCamelCase_ ): lowerCamelCase__ : str =weights_name.replace( '.bin' , f"""-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d} lowerCamelCase__ : List[str] =os.path.join(lowerCamelCase_ , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) lowerCamelCase__ : int =shard for key in shard: lowerCamelCase__ : Optional[int] =shard_file # Add the metadata lowerCamelCase__ : Any ={'total_size': total_size} lowerCamelCase__ : Tuple ={'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' ) as f: lowerCamelCase__ : Tuple =json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '\n' f.write(lowerCamelCase_ ) return metadata, index if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) lowerCAmelCase = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase_ ( ) ->str: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase__ : Optional[Any] =SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) lowerCamelCase__ : str =SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) lowerCamelCase__ : str =TaTokenizer.from_pretrained('t5-small' ) lowerCamelCase__ : Tuple ='A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' lowerCamelCase__ : Optional[int] =tokenizer(lowerCamelCase_ , return_tensors='pt' ).input_ids lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
126
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCamelCase( _a, unittest.TestCase ): lowercase_ : Union[str, Any] = ConsistencyModelPipeline lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase_ : List[str] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Tuple = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test', subfolder='test_unet', ) return unet @property def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : Tuple = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test', subfolder='test_unet_class_cond', ) return unet def UpperCamelCase ( self, lowerCamelCase=False) -> Dict: """simple docstring""" if class_cond: _lowercase : Union[str, Any] = self.dummy_cond_unet else: _lowercase : Union[str, Any] = self.dummy_uncond_unet # Default to CM multistep sampler _lowercase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, ) _lowercase : Optional[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple: """simple docstring""" if str(lowerCamelCase).startswith('mps'): _lowercase : str = torch.manual_seed(lowerCamelCase) else: _lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase) _lowercase : Tuple = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : Optional[int] = self.get_dummy_components() _lowercase : str = ConsistencyModelPipeline(**lowerCamelCase) _lowercase : Dict = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase) _lowercase : Optional[int] = pipe(**lowerCamelCase).images assert image.shape == (1, 32, 32, 3) _lowercase : int = image[0, -3:, -3:, -1] _lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase) _lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase) _lowercase : str = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Any = self.get_dummy_inputs(lowerCamelCase) _lowercase : Any = 0 _lowercase : List[str] = pipe(**lowerCamelCase).images assert image.shape == (1, 32, 32, 3) _lowercase : Any = image[0, -3:, -3:, -1] _lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : Any = self.get_dummy_components() _lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase) _lowercase : List[str] = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase) _lowercase : Union[str, Any] = 1 _lowercase : Tuple = None _lowercase : Tuple = pipe(**lowerCamelCase).images assert image.shape == (1, 32, 32, 3) _lowercase : str = image[0, -3:, -3:, -1] _lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase) _lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase) _lowercase : Optional[Any] = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase) _lowercase : Tuple = 1 _lowercase : int = None _lowercase : Tuple = 0 _lowercase : Dict = pipe(**lowerCamelCase).images assert image.shape == (1, 32, 32, 3) _lowercase : List[str] = image[0, -3:, -3:, -1] _lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 @slow @require_torch_gpu class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]: """simple docstring""" _lowercase : List[Any] = torch.manual_seed(lowerCamelCase) _lowercase : str = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: _lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase) _lowercase : Tuple = latents return inputs def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any: """simple docstring""" if type(lowerCamelCase) == str: _lowercase : Union[str, Any] = torch.device(lowerCamelCase) _lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase) _lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase) return latents def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2') _lowercase : Optional[int] = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, ) _lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase) pipe.to(torch_device=lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : str = self.get_inputs() _lowercase : Optional[int] = pipe(**lowerCamelCase).images assert image.shape == (1, 64, 64, 3) _lowercase : str = image[0, -3:, -3:, -1] _lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2 def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2') _lowercase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, ) _lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase) pipe.to(torch_device=lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : List[Any] = self.get_inputs() _lowercase : int = 1 _lowercase : Optional[Any] = None _lowercase : str = pipe(**lowerCamelCase).images assert image.shape == (1, 64, 64, 3) _lowercase : List[Any] = image[0, -3:, -3:, -1] _lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2 @require_torch_a def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2') _lowercase : Optional[int] = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, ) _lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase) pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase): _lowercase : Dict = pipe(**lowerCamelCase).images assert image.shape == (1, 64, 64, 3) _lowercase : Any = image[0, -3:, -3:, -1] _lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 @require_torch_a def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2') _lowercase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, ) _lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase) pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase) _lowercase : int = 1 _lowercase : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase): _lowercase : Union[str, Any] = pipe(**lowerCamelCase).images assert image.shape == (1, 64, 64, 3) _lowercase : Any = image[0, -3:, -3:, -1] _lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
21
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _A : List[Any] =logging.get_logger(__name__) _A : Optional[int] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _A : Union[str, Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : a = field( default=_lowercase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_lowercase )} ) a = field( default=_lowercase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a = field( default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a = field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a = field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a = field( default=_lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a = field( default=_lowercase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a = field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a = field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a = field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _lowercase ( _lowercase ): a = """train""" a = """dev""" class _lowercase ( _lowercase ): a = 42 a = 42 a = 42 a = 42 def __init__( self: Union[str, Any] , UpperCamelCase__: SquadDataTrainingArguments , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Union[str, Split] = Split.train , UpperCamelCase__: Optional[bool] = False , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = "pt" , ): lowerCamelCase__ : str = args lowerCamelCase__ : Optional[int] = is_language_sensitive lowerCamelCase__ : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCamelCase__ , UpperCamelCase__ ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) lowerCamelCase__ : int = mode # Load data features from cache or dataset file lowerCamelCase__ : List[str] = """v2""" if args.version_2_with_negative else """v1""" lowerCamelCase__ : Optional[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : Dict = cached_features_file + """.lock""" with FileLock(UpperCamelCase__ ): if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: lowerCamelCase__ : List[Any] = time.time() lowerCamelCase__ : Optional[int] = torch.load(UpperCamelCase__ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : int = self.old_features["""features"""] lowerCamelCase__ : int = self.old_features.get("""dataset""" , UpperCamelCase__ ) lowerCamelCase__ : Dict = self.old_features.get("""examples""" , UpperCamelCase__ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' """ future run""" ) else: if mode == Split.dev: lowerCamelCase__ : Optional[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : Union[str, Any] = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : List[Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCamelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase__ , ) lowerCamelCase__ : Dict = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , UpperCamelCase__ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self: Union[str, Any] ): return len(self.features ) def __getitem__( self: List[str] , UpperCamelCase__: List[str] ): # Convert to Tensors and build dataset lowerCamelCase__ : str = self.features[i] lowerCamelCase__ : str = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : int = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Dict = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : int = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : Dict = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : Dict = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
129
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : Any = tempfile.mkdtemp() lowerCamelCase__ : Any = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCamelCase__ : Union[str, Any] = { """do_resize""": True, """size""": {"""height""": 224, """width""": 224}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073], """image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711], """do_convert_rgb""": True, } lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Optional[int] ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , **UpperCamelCase__: List[str] ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , **UpperCamelCase__: int ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : Any = self.get_rust_tokenizer() lowerCamelCase__ : str = self.get_image_processor() lowerCamelCase__ : str = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) lowerCamelCase__ : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : str = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" ) lowerCamelCase__ : Optional[int] = self.get_image_processor(do_normalize=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=UpperCamelCase__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Optional[Any] = self.get_image_processor() lowerCamelCase__ : Optional[int] = self.get_tokenizer() lowerCamelCase__ : int = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase__ : Tuple = self.prepare_image_inputs() lowerCamelCase__ : str = image_processor(UpperCamelCase__ , return_tensors="""np""" ) lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : int = self.get_image_processor() lowerCamelCase__ : Optional[int] = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。""" lowerCamelCase__ : Optional[Any] = processor(text=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Optional[int] = self.get_image_processor() lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase__ : Dict = """Alexandra,T-shirt的价格是15便士。""" lowerCamelCase__ : Any = self.prepare_image_inputs() lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Dict = self.get_image_processor() lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ : Any = processor.batch_decode(UpperCamelCase__ ) lowerCamelCase__ : int = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Dict = self.get_image_processor() lowerCamelCase__ : int = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = """Alexandra,T-shirt的价格是15便士。""" lowerCamelCase__ : Tuple = self.prepare_image_inputs() lowerCamelCase__ : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
129
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : str ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase__ , ) assert hasattr(self , 'env' ) def A ( self : Optional[Any] , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = { 'enabled': True, 'processes_per_host': 8, } UpperCamelCase = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } UpperCamelCase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} UpperCamelCase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='py36' , ) def A ( self : Optional[int] , UpperCamelCase__ : int ): """simple docstring""" TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def A ( self : List[str] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.create_estimator(UpperCamelCase__ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
28
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def A ( self : Union[str, Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ): """simple docstring""" if rouge_types is None: UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ ) if use_aggregator: UpperCamelCase = scoring.BootstrapAggregator() else: UpperCamelCase = [] for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ ) if use_aggregator: aggregator.add_scores(UpperCamelCase__ ) else: scores.append(UpperCamelCase__ ) if use_aggregator: UpperCamelCase = aggregator.aggregate() else: UpperCamelCase = {} for key in scores[0]: UpperCamelCase = [score[key] for score in scores] return result
28
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp UpperCAmelCase__ : Union[str, Any] = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } UpperCAmelCase__ : Any = { 'RUCAIBox/mvp': 1024, } class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = ['''input_ids''', '''attention_mask'''] __UpperCamelCase : int = MvpTokenizer def __init__( self : Optional[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]="replace" , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : int="</s>" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : List[str]="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : Any , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) _A: Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase_ ) != add_prefix_space: _A: str = getattr(lowerCAmelCase_ , pre_tok_state.pop('''type''' ) ) _A: List[str] = add_prefix_space _A: Optional[int] = pre_tok_class(**lowerCAmelCase_ ) _A: List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A: Union[str, Any] = '''post_processor''' _A: Tuple = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) if tokenizer_component_instance: _A: str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A: int = tuple(state['''sep'''] ) if "cls" in state: _A: Any = tuple(state['''cls'''] ) _A: Union[str, Any] = False if state.get('''add_prefix_space''' , lowerCAmelCase_ ) != add_prefix_space: _A: Optional[Any] = add_prefix_space _A: str = True if state.get('''trim_offsets''' , lowerCAmelCase_ ) != trim_offsets: _A: str = trim_offsets _A: int = True if changes_to_apply: _A: Optional[int] = getattr(lowerCAmelCase_ , state.pop('''type''' ) ) _A: Union[str, Any] = component_class(**lowerCAmelCase_ ) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) @property def __magic_name__ ( self : Dict ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __magic_name__ ( self : Dict , lowerCAmelCase_ : int ): """simple docstring""" _A: Dict = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value _A: Optional[int] = value def __magic_name__ ( self : List[str] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ): """simple docstring""" _A: List[str] = kwargs.get('''is_split_into_words''' , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __magic_name__ ( self : int , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[str] ): """simple docstring""" _A: Union[str, Any] = kwargs.get('''is_split_into_words''' , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __magic_name__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ): """simple docstring""" _A: List[str] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=None ): """simple docstring""" _A: Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __magic_name__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ): """simple docstring""" _A: Optional[int] = [self.sep_token_id] _A: List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
365
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class UpperCAmelCase : '''simple docstring''' __UpperCamelCase : Any = MBartConfig __UpperCamelCase : Tuple = {} __UpperCamelCase : Dict = '''gelu''' def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ): """simple docstring""" _A: Union[str, Any] = parent _A: List[Any] = batch_size _A: Dict = seq_length _A: Dict = is_training _A: str = use_labels _A: int = vocab_size _A: str = hidden_size _A: Tuple = num_hidden_layers _A: Optional[Any] = num_attention_heads _A: Tuple = intermediate_size _A: int = hidden_dropout_prob _A: Tuple = attention_probs_dropout_prob _A: Tuple = max_position_embeddings _A: Dict = eos_token_id _A: int = pad_token_id _A: Any = bos_token_id def __magic_name__ ( self : Dict ): """simple docstring""" _A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) _A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A: int = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return config, inputs_dict def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ): """simple docstring""" _A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder() _A: List[str] = inputs_dict['''input_ids'''] _A: Tuple = input_ids[:1, :] _A: List[Any] = inputs_dict['''attention_mask'''][:1, :] _A: str = inputs_dict['''head_mask'''] _A: Optional[Any] = 1 # first forward pass _A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) _A , _A: List[str] = outputs.to_tuple() _A: Dict = past_key_values[1] def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple: if attention_mask is None: _A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A: Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : Tuple = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : List[Any] = True __UpperCamelCase : int = False __UpperCamelCase : Optional[Any] = False def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ): """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __magic_name__ ( self : Any ): """simple docstring""" _A: Dict = TFMBartModelTester(self ) _A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ ) def __magic_name__ ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ): """simple docstring""" _A: str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[int] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] __UpperCamelCase : List[str] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] __UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro''' @cached_property def __magic_name__ ( self : Tuple ): """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : str ): """simple docstring""" _A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ ) self.assertListEqual(self.expected_text , lowerCAmelCase_ ) def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' ) _A: Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) return generated_words @slow def __magic_name__ ( self : List[str] ): """simple docstring""" self._assert_generated_batch_equal_expected()
301
0
'''simple docstring''' from copy import deepcopy class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : list[int] | None = None , _UpperCAmelCase : int | None = None ): """simple docstring""" if arr is None and size is not None: UpperCAmelCase__ = size UpperCAmelCase__ = [0] * size elif arr is not None: self.init(_UpperCAmelCase ) else: raise ValueError("""Either arr or size must be specified""" ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : list[int] ): """simple docstring""" UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = deepcopy(_UpperCAmelCase ) for i in range(1 , self.size ): UpperCAmelCase__ = self.next_(_UpperCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): UpperCAmelCase__ = self.next_(_UpperCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ): """simple docstring""" return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ): """simple docstring""" return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value UpperCAmelCase__ = self.next_(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" self.add(_UpperCAmelCase , value - self.get(_UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int ): """simple docstring""" if right == 0: return 0 UpperCAmelCase__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] UpperCAmelCase__ = self.prev(_UpperCAmelCase ) return result def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" return self.prefix(_UpperCAmelCase ) - self.prefix(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int ): """simple docstring""" return self.query(_UpperCAmelCase , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" value -= self.tree[0] if value < 0: return -1 UpperCAmelCase__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 UpperCAmelCase__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy snake_case__ : str = logging.get_logger(__name__) class snake_case_( __SCREAMING_SNAKE_CASE ): def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float , **UpperCamelCase_ : Any ): lowerCAmelCase : Optional[Any] = feature_size lowerCAmelCase : Dict = sampling_rate lowerCAmelCase : List[Any] = padding_value lowerCAmelCase : Dict = kwargs.pop('''padding_side''' , '''right''' ) lowerCAmelCase : Tuple = kwargs.pop('''return_attention_mask''' , UpperCamelCase__ ) super().__init__(**UpperCamelCase__ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , ): if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): lowerCAmelCase : Any = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F''' to this method that includes {self.model_input_names[0]}, but you provided''' F''' {list(processed_features.keys() )}''' ) lowerCAmelCase : Any = processed_features[self.model_input_names[0]] lowerCAmelCase : str = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(UpperCamelCase__ ) == 0: if return_attention_mask: lowerCAmelCase : Any = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowerCAmelCase : int = required_input[0] if isinstance(UpperCamelCase__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowerCAmelCase : int = 0 while len(required_input[index] ) == 0: index += 1 if index < len(UpperCamelCase__ ): lowerCAmelCase : Any = required_input[index][0] if return_tensors is None: if is_tf_tensor(UpperCamelCase__ ): lowerCAmelCase : Tuple = '''tf''' elif is_torch_tensor(UpperCamelCase__ ): lowerCAmelCase : Tuple = '''pt''' elif isinstance(UpperCamelCase__ , (int, float, list, tuple, np.ndarray) ): lowerCAmelCase : Any = '''np''' else: raise ValueError( F'''type of {first_element} unknown: {type(UpperCamelCase__ )}. ''' '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): lowerCAmelCase : Dict = to_numpy(UpperCamelCase__ ) else: lowerCAmelCase : int = [to_numpy(UpperCamelCase__ ) for v in value] # Convert padding_strategy in PaddingStrategy lowerCAmelCase : Tuple = self._get_padding_strategies(padding=UpperCamelCase__ , max_length=UpperCamelCase__ ) lowerCAmelCase : Any = processed_features[self.model_input_names[0]] lowerCAmelCase : Dict = len(UpperCamelCase__ ) if not all(len(UpperCamelCase__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) lowerCAmelCase : Union[str, Any] = [] for i in range(UpperCamelCase__ ): lowerCAmelCase : Dict = {k: v[i] for k, v in processed_features.items()} # truncation lowerCAmelCase : int = self._truncate( UpperCamelCase__ , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , ) truncated_inputs.append(UpperCamelCase__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowerCAmelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowerCAmelCase : int = PaddingStrategy.MAX_LENGTH lowerCAmelCase : int = {} for i in range(UpperCamelCase__ ): # padding lowerCAmelCase : Optional[Any] = self._pad( truncated_inputs[i] , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ) for key, value in outputs.items(): if key not in batch_outputs: lowerCAmelCase : str = [] if value.dtype is np.dtype(np.floataa ): lowerCAmelCase : Dict = value.astype(np.floataa ) batch_outputs[key].append(UpperCamelCase__ ) return BatchFeature(UpperCamelCase__ , tensor_type=UpperCamelCase__ ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Optional[int] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowerCAmelCase : Tuple = len(UpperCamelCase__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCAmelCase : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCAmelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCamelCase__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowerCAmelCase : Optional[int] = np.ones(len(UpperCamelCase__ ) , dtype=np.intaa ) if needs_to_be_padded: lowerCAmelCase : Union[str, Any] = max_length - len(UpperCamelCase__ ) if self.padding_side == "right": if return_attention_mask: lowerCAmelCase : str = np.pad( processed_features['''attention_mask'''] , (0, difference) ) lowerCAmelCase : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowerCAmelCase : Optional[int] = np.pad( UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowerCAmelCase : Optional[int] = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) lowerCAmelCase : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowerCAmelCase : int = np.pad( UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) lowerCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCAmelCase : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCAmelCase : List[Any] = len(UpperCamelCase__ ) > max_length if needs_to_be_truncated: lowerCAmelCase : Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowerCAmelCase : List[str] = processed_features['''attention_mask'''][:max_length] return processed_features def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=None ): if padding is not False: if padding is True: lowerCAmelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase : Any = PaddingStrategy(UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase : Any = padding else: lowerCAmelCase : List[Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
367
"""simple docstring""" import os import pytest from transformers.dynamic_module_utils import get_imports snake_case__ : Optional[Any] = ''' import os ''' snake_case__ : Tuple = ''' def foo(): import os return False ''' snake_case__ : Any = ''' def foo(): def bar(): if True: import os return False return bar() ''' snake_case__ : Any = ''' import os try: import bar except ImportError: raise ValueError() ''' snake_case__ : int = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' snake_case__ : Any = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' snake_case__ : List[str] = ''' import os try: import bar except ImportError as e: raise ValueError() ''' snake_case__ : int = ''' import os try: import bar except: raise ValueError() ''' snake_case__ : List[Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' snake_case__ : Optional[int] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' snake_case__ : Any = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ): lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase : Tuple = get_imports(_snake_case ) assert parsed_imports == ["os"]
314
0
'''simple docstring''' import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap _lowercase : Union[str, Any] = "Usage of script: script_name <size_of_canvas:int>" _lowercase : Union[str, Any] = [0] * 1_0_0 + [1] * 1_0 random.shuffle(choice) def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Union[str, Any] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )] return canvas def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ): """simple docstring""" for i, row in enumerate(__SCREAMING_SNAKE_CASE ): for j, _ in enumerate(__SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = bool(random.getrandbits(1 ) ) def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ): """simple docstring""" lowercase_ : str = np.array(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(__SCREAMING_SNAKE_CASE ): for c, pt in enumerate(__SCREAMING_SNAKE_CASE ): lowercase_ : List[Any] = __judge_point( __SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) lowercase_ : Dict = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. lowercase_ : list[list[bool]] = current_canvas.tolist() return return_canvas def snake_case_ ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ): """simple docstring""" lowercase_ : List[str] = 0 lowercase_ : Tuple = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. lowercase_ : Union[str, Any] = pt if pt: if alive < 2: lowercase_ : Optional[int] = False elif alive == 2 or alive == 3: lowercase_ : int = True elif alive > 3: lowercase_ : Any = False else: if alive == 3: lowercase_ : str = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) _lowercase : Tuple = int(sys.argv[1]) # main working structure of this module. _lowercase : int = create_canvas(canvas_size) seed(c) _lowercase , _lowercase : List[str] = plt.subplots() fig.show() _lowercase : str = ListedColormap(["w", "k"]) try: while True: _lowercase : str = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
93
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCAmelCase =logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int: return field(default_factory=lambda: default , metadata=UpperCamelCase__ ) @dataclass class a__ : lowerCamelCase : List[str] =list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) lowerCamelCase : List[int] =list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) lowerCamelCase : List[int] =list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) lowerCamelCase : str =field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) lowerCamelCase : str =field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) lowerCamelCase : str =field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) lowerCamelCase : str =field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) lowerCamelCase : str =field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) lowerCamelCase : str =field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} ) lowerCamelCase : bool =field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , a , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
67
0
def _UpperCamelCase (a__ :int ): """simple docstring""" UpperCamelCase__ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch UpperCamelCase__ = random.Random() def _UpperCamelCase (a__ :Any , a__ :Union[str, Any]=1.0 , a__ :Tuple=None , a__ :str=None ): """simple docstring""" if rng is None: UpperCamelCase__ = global_rng UpperCamelCase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=400 , __lowerCAmelCase=2000 , __lowerCAmelCase=10 , __lowerCAmelCase=160 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=4000 , __lowerCAmelCase=False , __lowerCAmelCase=True , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = min_seq_length UpperCamelCase__ = max_seq_length UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase__ = padding_value UpperCamelCase__ = sampling_rate UpperCamelCase__ = return_attention_mask UpperCamelCase__ = do_normalize UpperCamelCase__ = feature_size UpperCamelCase__ = chunk_length UpperCamelCase__ = hop_length def _lowerCamelCase ( self ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ): def _flatten(__lowerCAmelCase ): return list(itertools.chain(*__lowerCAmelCase ) ) if equal_length: UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case : int = WhisperFeatureExtractor if is_speech_available() else None def _lowerCamelCase ( self ): UpperCamelCase__ = WhisperFeatureExtractionTester(self ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0] check_json_file_has_correct_format(__lowerCAmelCase ) UpperCamelCase__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase ) UpperCamelCase__ = feat_extract_first.to_dict() UpperCamelCase__ = feat_extract_second.to_dict() UpperCamelCase__ = feat_extract_first.mel_filters UpperCamelCase__ = feat_extract_second.mel_filters self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" ) feat_extract_first.to_json_file(__lowerCAmelCase ) UpperCamelCase__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase ) UpperCamelCase__ = feat_extract_first.to_dict() UpperCamelCase__ = feat_extract_second.to_dict() UpperCamelCase__ = feat_extract_first.mel_filters UpperCamelCase__ = feat_extract_second.mel_filters self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCamelCase ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size UpperCamelCase__ = feature_extractor(__lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input UpperCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features UpperCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test batched UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase__ = np.asarray(__lowerCAmelCase ) UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test truncation required UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs] UpperCamelCase__ = [x[: feature_extractor.n_samples] for x in speech_inputs] UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs_truncated] UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def _lowerCamelCase ( self ): import torch UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = np.random.rand(100 , 32 ).astype(np.floataa ) UpperCamelCase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech UpperCamelCase__ = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def _lowerCamelCase ( self ): # fmt: off UpperCamelCase__ = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on UpperCamelCase__ = self._load_datasamples(1 ) UpperCamelCase__ = WhisperFeatureExtractor() UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCAmelCase , atol=1E-4 ) ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase__ = self._load_datasamples(1 )[0] UpperCamelCase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue UpperCamelCase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCAmelCase )[0] self.assertTrue(np.all(np.mean(__lowerCAmelCase ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ) - 1 ) < 1E-3 ) )
87
1
import requests A : List[Any] = '''''' # <-- Put your OpenWeatherMap appid here! A : Dict = '''https://api.openweathermap.org/data/2.5/''' def __lowerCamelCase ( __a :str = "Chicago" , __a :str = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + """weather""" , params=locals() ).json() def __lowerCamelCase ( __a :str = "Kolkata, India" , __a :str = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def __lowerCamelCase ( __a :float = 55.68 , __a :float = 12.57 , __a :str = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: A : int = input('''Enter a location:''').strip() if location: pprint(current_weather(location)) else: break
274
def __lowerCamelCase ( __a :str ) -> list: """simple docstring""" A__ = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
366
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
164
0
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : def __init__( self, __a, __a): '''simple docstring''' if len(__a) != degree + 1: raise ValueError( "The number of coefficients should be equal to the degree + 1.") _lowerCAmelCase : list[float] = list(__a) _lowerCAmelCase : Any = degree def __add__( self, __a): '''simple docstring''' if self.degree > polynomial_a.degree: _lowerCAmelCase : Optional[Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree, __a) else: _lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree, __a) def __sub__( self, __a): '''simple docstring''' return self + polynomial_a * Polynomial(0, [-1]) def __neg__( self): '''simple docstring''' return Polynomial(self.degree, [-c for c in self.coefficients]) def __mul__( self, __a): '''simple docstring''' _lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1): for j in range(polynomial_a.degree + 1): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree, __a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : int | float = 0 for i in range(self.degree + 1): result += self.coefficients[i] * (substitution**i) return result def __str__( self): '''simple docstring''' _lowerCAmelCase : List[Any] = "" for i in range(self.degree, -1, -1): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i])) elif i == 1: polynomial += str(abs(self.coefficients[i])) + "x" else: polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a) return polynomial def __repr__( self): '''simple docstring''' return self.__str__() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : list[float] = [0] * self.degree for i in range(self.degree): _lowerCAmelCase : Optional[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1, __a) def snake_case__ ( self, __a = 0): '''simple docstring''' _lowerCAmelCase : list[float] = [0] * (self.degree + 2) _lowerCAmelCase : Dict = constant for i in range(self.degree + 1): _lowerCAmelCase : Tuple = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1, __a) def __eq__( self, __a): '''simple docstring''' if not isinstance(__a, __a): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self, __a): '''simple docstring''' return not self.__eq__(__a)
36
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline __A = "path-to-your-trained-model" __A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") __A = "A photo of sks dog in a bucket" __A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
148
0
import string def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for key in range(len(string.ascii_uppercase ) ): A_ = "" for symbol in message: if symbol in string.ascii_uppercase: A_ = string.ascii_uppercase.find(__UpperCamelCase ) A_ = num - key if num < 0: A_ = num + len(string.ascii_uppercase ) A_ = translated + string.ascii_uppercase[num] else: A_ = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def __snake_case ( ): """simple docstring""" A_ = input("Encrypted message: " ) A_ = message.upper() decrypt(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
329
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
1
def __lowerCamelCase ( __a :int , __a :bool = False ) -> bool: """simple docstring""" if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable: raise ValueError( """Warning: upper bound of deterministic test is exceeded. """ """Pass allow_probable=True to allow probabilistic test. """ """A return value of True indicates a probable prime.""" ) # array bounds provided by analysis A__ = [ 2_0_4_7, 1_3_7_3_6_5_3, 2_5_3_2_6_0_0_1, 3_2_1_5_0_3_1_7_5_1, 2_1_5_2_3_0_2_8_9_8_7_4_7, 3_4_7_4_7_4_9_6_6_0_3_8_3, 3_4_1_5_5_0_0_7_1_7_2_8_3_2_1, 1, 3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1, 1, 1, 3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1, 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1, ] A__ = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1] for idx, _p in enumerate(__a , 1 ): if n < _p: # then we have our last prime to check A__ = primes[:idx] break A__ , A__ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: A__ = False for r in range(__a ): A__ = pow(__a , d * 2**r , __a ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): A__ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __lowerCamelCase ( ) -> None: """simple docstring""" assert not miller_rabin(5_6_1 ) assert miller_rabin(5_6_3 ) # 2047 assert not miller_rabin(8_3_8_2_0_1 ) assert miller_rabin(8_3_8_2_0_7 ) # 1_373_653 assert not miller_rabin(1_7_3_1_6_0_0_1 ) assert miller_rabin(1_7_3_1_6_0_1_7 ) # 25_326_001 assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 ) assert miller_rabin(3_0_7_8_3_8_6_6_5_3 ) # 3_215_031_751 assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 ) assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 ) # 2_152_302_898_747 assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 ) assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 ) # 3_474_749_660_383 assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 ) assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 ) # 341_550_071_728_321 assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 ) assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 ) assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 ) assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
274
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A : str = 0 A : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A : Union[str, Any] = tuple[int, int] class A : '''simple docstring''' def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None: """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() A__ = self.g_cost + self.h_cost def a_ ( self : Dict ) -> float: """simple docstring""" A__ = self.pos_x - self.goal_x A__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : int , __lowerCAmelCase : Node ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple: """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase ) A__ = [self.start] A__ = [] A__ = False def a_ ( self : List[str] ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__lowerCAmelCase ) self.closed_nodes.append(__lowerCAmelCase ) A__ = self.get_successors(__lowerCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCAmelCase ) else: self.open_nodes.append(__lowerCAmelCase ) return [self.start.pos] def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]: """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) ) return successors def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]: """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None: """simple docstring""" A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = False def a_ ( self : int ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A__ = self.fwd_astar.open_nodes.pop(0 ) A__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) self.fwd_astar.closed_nodes.append(__lowerCAmelCase ) self.bwd_astar.closed_nodes.append(__lowerCAmelCase ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = astar.open_nodes.pop( astar.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__lowerCAmelCase ) else: astar.open_nodes.append(__lowerCAmelCase ) return [self.fwd_astar.start.pos] def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]: """simple docstring""" A__ = self.fwd_astar.retrace_path(__lowerCAmelCase ) A__ = self.bwd_astar.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A : Optional[int] = (0, 0) A : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Dict = time.time() A : Optional[Any] = AStar(init, goal) A : Optional[int] = a_star.search() A : Optional[int] = time.time() - start_time print(F'''AStar execution time = {end_time:f} seconds''') A : Dict = time.time() A : Tuple = BidirectionalAStar(init, goal) A : List[Any] = time.time() - bd_start_time print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
274
1
"""simple docstring""" def _lowercase ( __lowerCAmelCase ) -> set: SCREAMING_SNAKE_CASE__ : Union[str, Any] = set() # edges = list of graph's edges SCREAMING_SNAKE_CASE__ : List[str] = get_edges(__lowerCAmelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: SCREAMING_SNAKE_CASE__ : List[str] = edges.pop() chosen_vertices.add(__lowerCAmelCase ) chosen_vertices.add(__lowerCAmelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__lowerCAmelCase ) return chosen_vertices def _lowercase ( __lowerCAmelCase ) -> set: SCREAMING_SNAKE_CASE__ : Tuple = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
369
"""simple docstring""" from math import loga def _lowercase ( __lowerCAmelCase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
56
0
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
157
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _SCREAMING_SNAKE_CASE : str = False class __a ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class __a ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Optional[int] ): UpperCamelCase__ : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCamelCase__ : Dict =torch.manual_seed(0 ) UpperCamelCase__ : Optional[int] =pipe.dual_guided( prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) UpperCamelCase__ : str =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCamelCase__ : int =generator.manual_seed(0 ) UpperCamelCase__ : str =pipe.dual_guided( prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def _lowerCAmelCase ( self : Optional[Any] ): UpperCamelCase__ : Dict =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCamelCase__ : str ='''cyberpunk 2077''' UpperCamelCase__ : str =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCamelCase__ : int =torch.manual_seed(0 ) UpperCamelCase__ : int =pipe.dual_guided( prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCamelCase__ : List[str] =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : Dict =np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger ''' UpperCamelCase__ : Optional[int] =torch.manual_seed(0 ) UpperCamelCase__ : str =pipe.text_to_image( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCamelCase__ : str =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : List[Any] =np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ : Optional[Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type='''numpy''' ).images UpperCamelCase__ : str =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : Tuple =np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
157
1
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __lowerCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: _a : int = test_results.split(' ' ) _a : Optional[Any] = 0 _a : Optional[Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _a : Union[str, Any] = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCAmelCase_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def __lowerCamelCase ( lowerCAmelCase_ ) -> str: _a : List[str] = {} _a : str = None _a : List[str] = False for line in failures_short_lines.split('\n' ): if re.search(r'_ \[doctest\]' , lowerCAmelCase_ ): _a : int = True _a : Any = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): _a : Any = line _a : Union[str, Any] = False return failures class __magic_name__ : def __init__( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ): _a : str = title _a : Tuple = doc_test_results['time_spent'].split(',' )[0] _a : List[Any] = doc_test_results['success'] _a : Dict = doc_test_results['failures'] _a : Optional[Any] = self.n_success + self.n_failures # Failures and success of the modeling tests _a : Union[str, Any] = doc_test_results @property def __lowercase ( self : Tuple ): _a : Union[str, Any] = [self._time_spent] _a : List[str] = 0 for time in time_spent: _a : Optional[Any] = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_UpperCAmelCase ) == 1: _a : Union[str, Any] = [0, 0, time_parts[0]] _a , _a , _a : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds _a , _a , _a : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"""{int(_UpperCAmelCase )}h{int(_UpperCAmelCase )}m{int(_UpperCAmelCase )}s""" @property def __lowercase ( self : int ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __lowercase ( self : Optional[int] ): return { "type": "section", "text": { "type": "plain_text", "text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def __lowercase ( self : Tuple ): return { "type": "section", "text": { "type": "plain_text", "text": ( F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" F""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def __lowercase ( self : Optional[int] ): _a : List[str] = 40 _a : Optional[int] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_UpperCAmelCase ,_UpperCAmelCase )} _a : Optional[int] = '' for category, failures in category_failures.items(): if len(_UpperCAmelCase ) == 0: continue if report != "": report += "\n\n" report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_UpperCAmelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"""The following examples had failures:\n\n\n{report}\n""", }, } @property def __lowercase ( self : List[Any] ): _a : List[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_UpperCAmelCase ) @staticmethod def __lowercase ( ): _a : Union[str, Any] = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(_UpperCAmelCase )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=_UpperCAmelCase ,) def __lowercase ( self : str ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) _a : List[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.' _a : Dict = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=_UpperCAmelCase ,) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ): _a : Union[str, Any] = '' for key, value in failures.items(): _a : Optional[Any] = value[:200] + ' [Truncated]' if len(_UpperCAmelCase ) > 250 else value failures_text += F"""*{key}*\n_{value}_\n\n""" _a : Tuple = job_name _a : Union[str, Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: _a : Optional[int] = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __lowercase ( self : List[Any] ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) _a : str = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) _a : Tuple = sorted(self.doc_test_results.items() ,key=lambda _UpperCAmelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): _a : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n""" _a : List[str] = job_result['failures'] _a : str = self.get_reply_blocks(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,text=_UpperCAmelCase ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=F"""Results for {job}""" ,blocks=_UpperCAmelCase ,thread_ts=self.thread_ts['ts'] ,) time.sleep(1 ) def __lowerCamelCase ( ) -> Optional[Any]: _a : Optional[int] = os.environ['GITHUB_RUN_ID'] _a : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" _a : Union[str, Any] = requests.get(lowerCAmelCase_ ).json() _a : List[Any] = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) _a : int = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowerCAmelCase_ ): _a : List[Any] = requests.get(url + f"""&page={i + 2}""" ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , lowerCAmelCase_ ) return {} def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[int]: _a : Optional[Any] = {} if os.path.exists(lowerCAmelCase_ ): _a : Dict = os.listdir(lowerCAmelCase_ ) for file in files: try: with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , encoding='utf-8' ) as f: _a : List[str] = f.read() except UnicodeDecodeError as e: raise ValueError(f"""Could not open {os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}.""" ) from e return _artifact def __lowerCamelCase ( ) -> Optional[int]: class __magic_name__ : def __init__( self : Tuple ,_UpperCAmelCase : str ): _a : str = name _a : Union[str, Any] = [] def __str__( self : Optional[Any] ): return self.name def __lowercase ( self : Optional[int] ,_UpperCAmelCase : str ): self.paths.append({'name': self.name, 'path': path} ) _a : Dict[str, Artifact] = {} _a : Any = filter(os.path.isdir , os.listdir() ) for directory in directories: _a : Optional[int] = directory if artifact_name not in _available_artifacts: _a : Optional[Any] = Artifact(lowerCAmelCase_ ) _available_artifacts[artifact_name].add_path(lowerCAmelCase_ ) return _available_artifacts if __name__ == "__main__": __lowerCAmelCase = get_job_links() __lowerCAmelCase = retrieve_available_artifacts() __lowerCAmelCase = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __lowerCAmelCase = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job __lowerCAmelCase = github_actions_job_links.get('''run_doctests''') __lowerCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] __lowerCAmelCase = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = handle_test_results(artifact['''stats''']) __lowerCAmelCase = failed __lowerCAmelCase = success __lowerCAmelCase = time_spent[1:-1] + ''', ''' __lowerCAmelCase = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): __lowerCAmelCase = line.replace('''FAILED ''', '''''') __lowerCAmelCase = line.split()[0].replace('''\n''', '''''') if "::" in line: __lowerCAmelCase , __lowerCAmelCase = line.split('''::''') else: __lowerCAmelCase , __lowerCAmelCase = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __lowerCAmelCase = docs[file_regex] doc_test_results[category]["failed"].append(test) __lowerCAmelCase = all_failures[test] if test in all_failures else '''N/A''' __lowerCAmelCase = failure break __lowerCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
"""simple docstring""" from __future__ import annotations a = 10 def _snake_case ( _snake_case : list[int] ) -> list[int]: '''simple docstring''' _A = 1 _A = max(_snake_case ) while placement <= max_digit: # declare and initialize empty buckets _A = [[] for _ in range(_snake_case )] # split list_of_ints between the buckets for i in list_of_ints: _A = int((i / placement) % RADIX ) buckets[tmp].append(_snake_case ) # put each buckets' contents into list_of_ints _A = 0 for b in range(_snake_case ): for i in buckets[b]: _A = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
271
"""simple docstring""" import argparse from collections import defaultdict import yaml a = '''docs/source/en/_toctree.yml''' def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' _A = defaultdict(_snake_case ) _A = [] _A = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(_snake_case ) _A = new_doc_list _A = [key for key, value in counts.items() if value > 1] _A = [] for duplicate_key in duplicates: _A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(_snake_case ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) _A = sorted(_snake_case , key=lambda _snake_case : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_snake_case ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(_snake_case ) # Sort return overview_doc def _snake_case ( _snake_case : Tuple=False ) -> List[Any]: '''simple docstring''' with open(_snake_case , encoding='utf-8' ) as f: _A = yaml.safe_load(f.read() ) # Get to the API doc _A = 0 while content[api_idx]["title"] != "API": api_idx += 1 _A = content[api_idx]['sections'] # Then to the model doc _A = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _A = api_doc[scheduler_idx]['sections'] _A = clean_doc_toc(_snake_case ) _A = False if new_scheduler_doc != scheduler_doc: _A = True if overwrite: _A = new_scheduler_doc if diff: if overwrite: _A = api_doc with open(_snake_case , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def _snake_case ( _snake_case : str=False ) -> Union[str, Any]: '''simple docstring''' with open(_snake_case , encoding='utf-8' ) as f: _A = yaml.safe_load(f.read() ) # Get to the API doc _A = 0 while content[api_idx]["title"] != "API": api_idx += 1 _A = content[api_idx]['sections'] # Then to the model doc _A = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _A = False _A = api_doc[pipeline_idx]['sections'] _A = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _A = pipeline_doc['section'] _A = clean_doc_toc(_snake_case ) if overwrite: _A = new_sub_pipeline_doc new_pipeline_docs.append(_snake_case ) # sort overall pipeline doc _A = clean_doc_toc(_snake_case ) if new_pipeline_docs != pipeline_docs: _A = True if overwrite: _A = new_pipeline_docs if diff: if overwrite: _A = api_doc with open(_snake_case , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
271
1
from numpy import exp, pi, sqrt def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
279
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : str = list(s_dict.keys() ) for key in keys: snake_case_ : Optional[int] = key for k, v in WHISPER_MAPPING.items(): if k in key: snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'''{key} -> {new_key}''' ) snake_case_ : Tuple = s_dict.pop(_UpperCamelCase ) return s_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : Dict = emb.weight.shape snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) snake_case_ : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes: """simple docstring""" os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.basename(_UpperCamelCase ) snake_case_ : Any = url.split('''/''' )[-2] snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_UpperCamelCase ): snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop: while True: snake_case_ : Dict = source.read(8_192 ) if not buffer: break output.write(_UpperCamelCase ) loop.update(len(_UpperCamelCase ) ) snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if ".pt" not in checkpoint_path: snake_case_ : str = _download(_MODELS[checkpoint_path] ) else: snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) snake_case_ : int = original_checkpoint['''dims'''] snake_case_ : List[str] = original_checkpoint['''model_state_dict'''] snake_case_ : str = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_UpperCamelCase ) rename_keys(_UpperCamelCase ) snake_case_ : Optional[int] = True snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] snake_case_ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ : Any = proj_out_weights model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
279
1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) SCREAMING_SNAKE_CASE = "" while len(_lowerCAmelCase ) % 3 != 0: SCREAMING_SNAKE_CASE = "0" + bin_string SCREAMING_SNAKE_CASE = [ bin_string[index : index + 3] for index in range(len(_lowerCAmelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: SCREAMING_SNAKE_CASE = 0 for index, val in enumerate(_lowerCAmelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) ) oct_string += str(_lowerCAmelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
354
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE_ = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
193
0
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class lowercase__ ( lowercase ): lowercase__ = 42 @flax_register_to_config class lowercase__ ( nn.Module , lowercase , lowercase ): lowercase__ = 32 lowercase__ = 4 lowercase__ = 4 lowercase__ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") lowercase__ = False lowercase__ = (3_20, 6_40, 12_80, 12_80) lowercase__ = 2 lowercase__ = 8 lowercase__ = None lowercase__ = 12_80 lowercase__ = 0.0 lowercase__ = False lowercase__ = jnp.floataa lowercase__ = True lowercase__ = 0 lowercase__ = False def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : jax.random.KeyArray ): '''simple docstring''' # init input tensors _UpperCamelCase : Any = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase : Dict = jnp.zeros(lowerCamelCase__ ,dtype=jnp.floataa ) _UpperCamelCase : str = jnp.ones((1,) ,dtype=jnp.intaa ) _UpperCamelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase : Dict = jax.random.split(lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = {'params': params_rng, 'dropout': dropout_rng} return self.init(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )["params"] def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : Optional[int] = self.block_out_channels _UpperCamelCase : Union[str, Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase : Tuple = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase : List[Any] = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time _UpperCamelCase : Optional[int] = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) _UpperCamelCase : int = FlaxTimestepEmbedding(lowerCamelCase__ ,dtype=self.dtype ) _UpperCamelCase : Tuple = self.only_cross_attention if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Any = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Optional[int] = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase : str = [] _UpperCamelCase : Tuple = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase : Dict = output_channel _UpperCamelCase : Dict = block_out_channels[i] _UpperCamelCase : Optional[Any] = i == len(lowerCamelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase : Tuple = FlaxCrossAttnDownBlockaD( in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: _UpperCamelCase : Union[str, Any] = FlaxDownBlockaD( in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(lowerCamelCase__ ) _UpperCamelCase : Any = down_blocks # mid _UpperCamelCase : str = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) # up _UpperCamelCase : Tuple = [] _UpperCamelCase : Union[str, Any] = list(reversed(lowerCamelCase__ ) ) _UpperCamelCase : Optional[Any] = list(reversed(lowerCamelCase__ ) ) _UpperCamelCase : Dict = list(reversed(lowerCamelCase__ ) ) _UpperCamelCase : Union[str, Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase : Dict = output_channel _UpperCamelCase : Any = reversed_block_out_channels[i] _UpperCamelCase : int = reversed_block_out_channels[min(i + 1 ,len(lowerCamelCase__ ) - 1 )] _UpperCamelCase : Optional[int] = i == len(lowerCamelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase : Tuple = FlaxCrossAttnUpBlockaD( in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,prev_output_channel=lowerCamelCase__ ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: _UpperCamelCase : List[str] = FlaxUpBlockaD( in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,prev_output_channel=lowerCamelCase__ ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,) up_blocks.append(lowerCamelCase__ ) _UpperCamelCase : str = output_channel _UpperCamelCase : Dict = up_blocks # out _UpperCamelCase : Tuple = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) _UpperCamelCase : str = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,): '''simple docstring''' # 1. time if not isinstance(lowerCamelCase__ ,jnp.ndarray ): _UpperCamelCase : Any = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(lowerCamelCase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase : Dict = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase : Optional[int] = jnp.expand_dims(lowerCamelCase__ ,0 ) _UpperCamelCase : List[Any] = self.time_proj(lowerCamelCase__ ) _UpperCamelCase : Tuple = self.time_embedding(lowerCamelCase__ ) # 2. pre-process _UpperCamelCase : List[str] = jnp.transpose(lowerCamelCase__ ,(0, 2, 3, 1) ) _UpperCamelCase : str = self.conv_in(lowerCamelCase__ ) # 3. down _UpperCamelCase : Tuple = (sample,) for down_block in self.down_blocks: if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase , _UpperCamelCase : List[str] = down_block(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase : List[Any] = down_block(lowerCamelCase__ ,lowerCamelCase__ ,deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase : Optional[int] = () for down_block_res_sample, down_block_additional_residual in zip( lowerCamelCase__ ,lowerCamelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase : int = new_down_block_res_samples # 4. mid _UpperCamelCase : int = self.mid_block(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase : int = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Tuple = up_block( lowerCamelCase__ ,temb=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,res_hidden_states_tuple=lowerCamelCase__ ,deterministic=not train ,) else: _UpperCamelCase : str = up_block(lowerCamelCase__ ,temb=lowerCamelCase__ ,res_hidden_states_tuple=lowerCamelCase__ ,deterministic=not train ) # 6. post-process _UpperCamelCase : Optional[int] = self.conv_norm_out(lowerCamelCase__ ) _UpperCamelCase : Tuple = nn.silu(lowerCamelCase__ ) _UpperCamelCase : int = self.conv_out(lowerCamelCase__ ) _UpperCamelCase : Any = jnp.transpose(lowerCamelCase__ ,(0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCamelCase__ )
83
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( lowerCAmelCase ): """simple docstring""" __A = 42 __A = None def a( A : Optional[Any] , A : Any=0.999 , A : Dict="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A : Any ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) a = [] for i in range(A ): a = i / num_diffusion_timesteps a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) ) return torch.tensor(A , dtype=torch.floataa ) class _lowercase ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" __A = 1 @register_to_config def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0001 , lowerCamelCase_ = 0.02 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = 0 , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = 1.0 , **lowerCamelCase_ , ): """simple docstring""" if kwargs.get("set_alpha_to_one" , lowerCamelCase_ ) is not None: a = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ ) a = kwargs["set_alpha_to_one"] if trained_betas is not None: a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "linear": a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule a = betas_for_alpha_bar(lowerCamelCase_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) a = 1.0 - self.betas a = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. a = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution a = 1.0 # setable values a = None a = torch.from_numpy(np.arange(0 , lowerCamelCase_ ).copy().astype(np.intaa ) ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" return sample def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) a = num_inference_steps a = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round().copy().astype(np.intaa ) a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ ) self.timesteps += self.config.steps_offset def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , ): """simple docstring""" a = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process a = self.alphas_cumprod[timestep] a = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 a = model_output elif self.config.prediction_type == "sample": a = model_output a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output a = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: a = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ ) def __len__(self ): """simple docstring""" return self.config.num_train_timesteps
227
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
219
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 16 __snake_case = 32 def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any: '''simple docstring''' UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a ) UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase__ :Optional[int] = datasets.map( __a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCamelCase__ :Dict = DataLoader( tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) UpperCamelCase__ :str = DataLoader( tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader def a ( __a , __a , __a , __a ) -> str: '''simple docstring''' model.eval() UpperCamelCase__ :List[str] = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase__ :int = model(**__a ) UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__a ) - 1: UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__a , references=__a , ) UpperCamelCase__ :Union[str, Any] = metric.compute() return eval_metric["accuracy"] def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase__ :Any = config['''lr'''] UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] ) UpperCamelCase__ :List[Any] = int(config['''seed'''] ) UpperCamelCase__ :List[Any] = int(config['''batch_size'''] ) UpperCamelCase__ :List[Any] = args.model_name_or_path set_seed(__a ) UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a ) # Instantiate optimizer UpperCamelCase__ :Any = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCamelCase__ :Dict = 1 UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase__ :Any = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , ) else: UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare( __a , __a , __a , __a , __a ) # We need to keep track of how many total steps we have iterated over UpperCamelCase__ :Tuple = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' ) UpperCamelCase__ :List[Any] = num_epochs if args.partial_train_epoch is not None: UpperCamelCase__ :Optional[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCamelCase__ :Tuple = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCamelCase__ :Any = int(__a ) + 1 UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a ) accelerator.print('''resumed checkpoint performance:''' , __a ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.load(__a ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCamelCase__ :Optional[Any] = {} for epoch in range(__a , __a ): model.train() for step, batch in enumerate(__a ): UpperCamelCase__ :Optional[int] = model(**__a ) UpperCamelCase__ :Optional[int] = outputs.loss UpperCamelCase__ :str = loss / gradient_accumulation_steps accelerator.backward(__a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}''' UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a ) accelerator.save_state(__a ) UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a ) UpperCamelCase__ :int = accuracy UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0] UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr'''] UpperCamelCase__ :int = epoch UpperCamelCase__ :Tuple = overall_step accelerator.print(f'''epoch {epoch}:''' , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f: json.dump(__a , __a ) def a ( ) -> Tuple: '''simple docstring''' UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , ) parser.add_argument( '''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , ) UpperCamelCase__ :Optional[int] = parser.parse_args() UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(__a , __a ) if __name__ == "__main__": main()
219
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __lowercase ( unittest.TestCase ): def UpperCAmelCase__ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ (self ): lowerCamelCase_ : Tuple = 1 lowerCamelCase_ : str = 3 lowerCamelCase_ : Dict = (3_2, 3_2) lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A ) return image @property def UpperCAmelCase__ (self ): torch.manual_seed(0 ) lowerCamelCase_ : Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) return model @property def UpperCAmelCase__ (self ): torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCAmelCase__ (self ): torch.manual_seed(0 ) lowerCamelCase_ : Any = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(A ) @property def UpperCAmelCase__ (self ): def extract(*A , **A ): class __lowercase : def __init__(self ): lowerCamelCase_ : Any = torch.ones([0] ) def UpperCAmelCase__ (self , A ): self.pixel_values.to(A ) return self return Out() return extract def UpperCAmelCase__ (self ): lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ : List[Any] = self.dummy_cond_unet lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A ) lowerCamelCase_ : Union[str, Any] = self.dummy_vae lowerCamelCase_ : List[Any] = self.dummy_text_encoder lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase_ : Dict = 7_7 lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A ) lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline( unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , ) lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A ) lowerCamelCase_ : int = alt_pipe.to(A ) alt_pipe.set_progress_bar_config(disable=A ) lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger''' lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 ) lowerCamelCase_ : Optional[Any] = alt_pipe( [prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , ) lowerCamelCase_ : int = output.images lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = alt_pipe( [prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0] lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Dict = self.dummy_cond_unet lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A ) lowerCamelCase_ : List[Any] = self.dummy_vae lowerCamelCase_ : Dict = self.dummy_text_encoder lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase_ : Optional[Any] = 7_7 lowerCamelCase_ : str = self.dummy_image.to(A ) # put models in fp16 lowerCamelCase_ : Optional[int] = unet.half() lowerCamelCase_ : Dict = vae.half() lowerCamelCase_ : Union[str, Any] = bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline( unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , ) lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A ) lowerCamelCase_ : Any = alt_pipe.to(A ) alt_pipe.set_progress_bar_config(disable=A ) lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger''' lowerCamelCase_ : str = torch.manual_seed(0 ) lowerCamelCase_ : Optional[int] = alt_pipe( [prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) ) lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion''' lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained( A , safety_checker=A , ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation''' lowerCamelCase_ : Any = torch.manual_seed(0 ) lowerCamelCase_ : Optional[Any] = pipe( prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , ) lowerCamelCase_ : Dict = output.images[0] lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def UpperCAmelCase__ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) ) lowerCamelCase_ : str = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) lowerCamelCase_ : int = '''BAAI/AltDiffusion''' lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained( A , safety_checker=A , ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation''' lowerCamelCase_ : List[Any] = torch.manual_seed(0 ) lowerCamelCase_ : Dict = pipe( prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , ) lowerCamelCase_ : List[str] = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
318
0
"""simple docstring""" from __future__ import annotations def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[tuple[int, int]]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :str = position lowerCAmelCase__ :Optional[Any] = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowerCAmelCase__ :Union[str, Any] = [] for position in positions: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_SCREAMING_SNAKE_CASE ) return permissible_positions def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" if is_complete(_SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ , lowerCAmelCase__ :List[str] = position if board[y][x] == 0: lowerCAmelCase__ :str = curr + 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ): return True lowerCAmelCase__ :Optional[Any] = 0 return False def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board lowerCAmelCase__ :Dict = 0 lowerCAmelCase__ :Any = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
254
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1e-12 ) ->str: """simple docstring""" lowerCAmelCase__ :Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T lowerCAmelCase__ :int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T ) class _lowerCAmelCase ( nn.Module ): """simple docstring""" __magic_name__ :CLIPConfig __magic_name__ :jnp.dtype = jnp.floataa def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config ) lowerCAmelCase__ :str = nn.Dense(self.config.projection_dim , use_bias=__UpperCAmelCase , dtype=self.dtype ) lowerCAmelCase__ :Optional[Any] = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) lowerCAmelCase__ :Optional[int] = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) lowerCAmelCase__ :Any = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) ) lowerCAmelCase__ :List[Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.vision_model(__UpperCAmelCase )[1] lowerCAmelCase__ :Optional[int] = self.visual_projection(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = jax_cosine_distance(__UpperCAmelCase , self.special_care_embeds ) lowerCAmelCase__ :Tuple = jax_cosine_distance(__UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase__ :Dict = 0.0 lowerCAmelCase__ :List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase__ :Optional[Any] = jnp.round(__UpperCAmelCase , 3 ) lowerCAmelCase__ :Tuple = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCAmelCase ) # Use a lower threshold if an image has any special care concept lowerCAmelCase__ :List[Any] = is_special_care * 0.01 lowerCAmelCase__ :Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase__ :Any = jnp.round(__UpperCAmelCase , 3 ) lowerCAmelCase__ :Tuple = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Tuple = CLIPConfig __magic_name__ :Tuple = """clip_input""" __magic_name__ :str = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = jnp.floataa , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' if input_shape is None: lowerCAmelCase__ :Dict = (1, 2_2_4, 2_2_4, 3) lowerCAmelCase__ :Any = self.module_class(config=__UpperCAmelCase , dtype=__UpperCAmelCase , **__UpperCAmelCase ) super().__init__(__UpperCAmelCase , __UpperCAmelCase , input_shape=__UpperCAmelCase , seed=__UpperCAmelCase , dtype=__UpperCAmelCase , _do_init=_do_init ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :str = jax.random.normal(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = {'params': params_rng, 'dropout': dropout_rng} lowerCAmelCase__ :Optional[int] = self.module.init(__UpperCAmelCase , __UpperCAmelCase )['params'] return random_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
254
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_nllb_moe': [ 'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NllbMoeConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST', 'NllbMoeForConditionalGeneration', 'NllbMoeModel', 'NllbMoePreTrainedModel', 'NllbMoeTop2Router', 'NllbMoeSparseMLP', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
180
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class a ( __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any = '''resnet''' lowerCamelCase :Any = ['''basic''', '''bottleneck'''] def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_=[3, 4, 6, 3] , lowerCAmelCase_="bottleneck" , lowerCAmelCase_="relu" , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Union[str, Any]: super().__init__(**lowerCAmelCase_ ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' ) _A = num_channels _A = embedding_size _A = hidden_sizes _A = depths _A = layer_type _A = hidden_act _A = downsample_in_first_stage _A = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )] _A , _A = get_aligned_output_features_output_indices( out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Optional[Any] = version.parse('''1.11''' ) @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase ( self ) -> float: return 1E-3
180
1
"""simple docstring""" from typing import Any class __A : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : Any ) ->int: """simple docstring""" snake_case_ = data snake_case_ = None class __A : '''simple docstring''' def __init__( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = None def lowerCAmelCase ( self : Any ) ->int: """simple docstring""" snake_case_ = self.head while temp is not None: print(temp.data , end=""" """ ) snake_case_ = temp.next print() def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Any ) ->Dict: """simple docstring""" snake_case_ = Node(UpperCAmelCase_ ) snake_case_ = self.head snake_case_ = new_node def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" if node_data_a == node_data_a: return else: snake_case_ = self.head while node_a is not None and node_a.data != node_data_a: snake_case_ = node_a.next snake_case_ = self.head while node_a is not None and node_a.data != node_data_a: snake_case_ = node_a.next if node_a is None or node_a is None: return snake_case_ = node_a.data, node_a.data if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
365
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: List[Any] = CycleDiffusionPipeline __lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { """negative_prompt""", """height""", """width""", """negative_prompt_embeds""", } __lowercase: str = PipelineTesterMixin.required_optional_params - {"""latents"""} __lowercase: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""}) __lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ = CLIPTextModel(UpperCAmelCase_ ) snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->str: """simple docstring""" snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) snake_case_ = image / 2 + 0.5 if str(UpperCAmelCase_ ).startswith("""mps""" ): snake_case_ = torch.manual_seed(UpperCAmelCase_ ) else: snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowerCAmelCase ( self : Union[str, Any] ) ->str: """simple docstring""" snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(UpperCAmelCase_ , """half""" ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Tuple ) ->List[Any]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = """CompVis/stable-diffusion-v1-4""" snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" ) snake_case_ = CycleDiffusionPipeline.from_pretrained( UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = """A black colored car""" snake_case_ = """A blue colored car""" snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = """CompVis/stable-diffusion-v1-4""" snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" ) snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = """A black colored car""" snake_case_ = """A blue colored car""" snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2E-2
233
0
import re def A_ ( snake_case : Optional[int] ) -> bool: '''simple docstring''' __UpperCamelCase = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) ) if __name__ == "__main__": lowercase__ : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
328
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
from math import factorial def lowerCamelCase_ ( UpperCamelCase__ : int = 100 ): '''simple docstring''' return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
365
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowercase = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowercase = logging.get_logger(__name__) class __lowercase ( A ): '''simple docstring''' _A : Any = '''maskformer''' _A : Any = {'''hidden_size''': '''mask_feature_size'''} _A : List[str] = ['''resnet''', '''swin'''] _A : Tuple = ['''detr'''] def __init__( self : Optional[Any] , _a : int = 256 , _a : int = 256 , _a : float = 0.1 , _a : bool = False , _a : Optional[Dict] = None , _a : Optional[Dict] = None , _a : float = 0.02 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 20.0 , _a : Optional[bool] = None , **_a : List[str] , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k UpperCamelCase__ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_a , _a ): UpperCamelCase__ = backbone_config.pop('''model_type''' ) UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ = config_class.from_dict(_a ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 UpperCamelCase__ = DetrConfig() else: # verify that the decoder is supported UpperCamelCase__ = ( decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"""Transformer Decoder {decoder_type} not supported, please use one of""" F""" {",".join(self.decoders_supported )}""" ) if isinstance(_a , _a ): UpperCamelCase__ = CONFIG_MAPPING[decoder_type] UpperCamelCase__ = config_class.from_dict(_a ) UpperCamelCase__ = backbone_config UpperCamelCase__ = decoder_config # main feature dimension for the model UpperCamelCase__ = fpn_feature_size UpperCamelCase__ = mask_feature_size # initializer UpperCamelCase__ = init_std UpperCamelCase__ = init_xavier_std # Hungarian matcher && loss UpperCamelCase__ = cross_entropy_weight UpperCamelCase__ = dice_weight UpperCamelCase__ = mask_weight UpperCamelCase__ = use_auxiliary_loss UpperCamelCase__ = no_object_weight UpperCamelCase__ = output_auxiliary_logits UpperCamelCase__ = self.decoder_config.encoder_attention_heads UpperCamelCase__ = self.decoder_config.num_hidden_layers super().__init__(**_a ) @classmethod def A_ ( cls : Tuple , _a : PretrainedConfig , _a : PretrainedConfig , **_a : str ): return cls( backbone_config=_a , decoder_config=_a , **_a , ) def A_ ( self : str ): UpperCamelCase__ = copy.deepcopy(self.__dict__ ) UpperCamelCase__ = self.backbone_config.to_dict() UpperCamelCase__ = self.decoder_config.to_dict() UpperCamelCase__ = self.__class__.model_type return output
35
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = ["vqvae"] def __init__( self: Tuple , __lowerCamelCase: AutoencoderKL , __lowerCamelCase: UNetaDConditionModel , __lowerCamelCase: Mel , __lowerCamelCase: Union[DDIMScheduler, DDPMScheduler] , ) -> str: super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> int: return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 10_00 @torch.no_grad() def __call__( self: Optional[int] , __lowerCamelCase: int = 1 , __lowerCamelCase: str = None , __lowerCamelCase: np.ndarray = None , __lowerCamelCase: int = 0 , __lowerCamelCase: int = 0 , __lowerCamelCase: int = None , __lowerCamelCase: torch.Generator = None , __lowerCamelCase: float = 0 , __lowerCamelCase: float = 0 , __lowerCamelCase: torch.Generator = None , __lowerCamelCase: float = 0 , __lowerCamelCase: torch.Tensor = None , __lowerCamelCase: torch.Tensor = None , __lowerCamelCase: int=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: __UpperCAmelCase : Tuple = steps or self.get_default_steps() self.scheduler.set_timesteps(__lowerCamelCase ) __UpperCAmelCase : Any = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __UpperCAmelCase : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __UpperCAmelCase : Union[str, Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowerCamelCase , device=self.device , ) __UpperCAmelCase : List[Any] = noise __UpperCAmelCase : Dict = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[Any] = self.mel.audio_slice_to_image(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) __UpperCAmelCase : Optional[Any] = (input_image / 2_55) * 2 - 1 __UpperCAmelCase : int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __UpperCAmelCase : Dict = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample( generator=__lowerCamelCase )[0] __UpperCAmelCase : Any = self.vqvae.config.scaling_factor * input_images if start_step > 0: __UpperCAmelCase : Any = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] ) __UpperCAmelCase : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __UpperCAmelCase : Tuple = int(mask_start_secs * pixels_per_second ) __UpperCAmelCase : Dict = int(mask_end_secs * pixels_per_second ) __UpperCAmelCase : Optional[int] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowerCamelCase ): __UpperCAmelCase : Any = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"] else: __UpperCAmelCase : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] if isinstance(self.scheduler , __lowerCamelCase ): __UpperCAmelCase : str = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] else: __UpperCAmelCase : List[str] = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] if mask is not None: if mask_start > 0: __UpperCAmelCase : Union[str, Any] = mask[:, step, :, :mask_start] if mask_end > 0: __UpperCAmelCase : List[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __UpperCAmelCase : Optional[int] = 1 / self.vqvae.config.scaling_factor * images __UpperCAmelCase : Dict = self.vqvae.decode(__lowerCamelCase )["sample"] __UpperCAmelCase : Dict = (images / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __UpperCAmelCase : str = (images * 2_55).round().astype("uint8" ) __UpperCAmelCase : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) ) __UpperCAmelCase : Optional[int] = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) ) @torch.no_grad() def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[Image.Image] , __lowerCamelCase: int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase ) __UpperCAmelCase : Any = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) __UpperCAmelCase : Dict = (sample / 2_55) * 2 - 1 __UpperCAmelCase : int = torch.Tensor(__lowerCamelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __UpperCAmelCase : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __UpperCAmelCase : Union[str, Any] = self.scheduler.alphas_cumprod[t] __UpperCAmelCase : Dict = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __UpperCAmelCase : Tuple = 1 - alpha_prod_t __UpperCAmelCase : int = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] __UpperCAmelCase : Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output __UpperCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __UpperCAmelCase : Tuple = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowerCamelCase ( __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: float ) -> torch.Tensor: __UpperCAmelCase : Dict = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
157
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __UpperCAmelCase : List[str] = (low + high) // 2 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = max_subarray(snake_case__, snake_case__, snake_case__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_subarray(snake_case__, mid + 1, snake_case__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_cross_sum(snake_case__, snake_case__, snake_case__, snake_case__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[int, int, float]: __UpperCAmelCase , __UpperCAmelCase : Any = float("-inf" ), -1 __UpperCAmelCase , __UpperCAmelCase : Dict = float("-inf" ), -1 __UpperCAmelCase : int | float = 0 for i in range(snake_case__, low - 1, -1 ): summ += arr[i] if summ > left_sum: __UpperCAmelCase : Optional[int] = summ __UpperCAmelCase : Optional[Any] = i __UpperCAmelCase : List[Any] = 0 for i in range(mid + 1, high + 1 ): summ += arr[i] if summ > right_sum: __UpperCAmelCase : List[str] = summ __UpperCAmelCase : Dict = i return max_left, max_right, (left_sum + right_sum) def _UpperCamelCase ( snake_case__ ) -> float: __UpperCAmelCase : Optional[int] = [randint(1, snake_case__ ) for _ in range(snake_case__ )] __UpperCAmelCase : Optional[int] = time.time() max_subarray(snake_case__, 0, input_size - 1 ) __UpperCAmelCase : List[str] = time.time() return end - start def _UpperCamelCase ( ) -> None: __UpperCAmelCase : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __UpperCAmelCase : Optional[Any] = [time_max_subarray(snake_case__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(snake_case__, snake_case__ ): print(snake_case__, "\t\t", snake_case__ ) plt.plot(snake_case__, snake_case__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
157
1
from math import isclose, sqrt def lowerCamelCase__ ( a , a , a ) -> tuple[float, float, float]: _A: List[str] = point_y / 4 / point_x _A: str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _A: Any = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _A: Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _A: Tuple = outgoing_gradient**2 + 4 _A: Any = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _A: Union[str, Any] = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 _A: List[str] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _A: Optional[int] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _A: Union[str, Any] = x_minus if isclose(a , a ) else x_plus _A: Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowerCamelCase__ ( a = 1.4 , a = -9.6 ) -> int: _A: int = 0 _A: float = first_x_coord _A: float = first_y_coord _A: float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): _A , _A , _A: List[str] = next_point(a , a , a ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
301
from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCAmelCase__ : Tuple = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
301
1
from __future__ import annotations def A ( _SCREAMING_SNAKE_CASE ) -> int: # preprocessing the first row for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
48
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = ["""input_features"""] def __init__( self , lowerCAmelCase=80 , lowerCAmelCase=16_000 , lowerCAmelCase=160 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=0.0 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Any: '''simple docstring''' super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) _lowercase =n_fft _lowercase =hop_length _lowercase =chunk_length _lowercase =chunk_length * sampling_rate _lowercase =self.n_samples // hop_length _lowercase =sampling_rate _lowercase =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ) def A__ ( self , lowerCAmelCase ) -> np.ndarray: '''simple docstring''' _lowercase =spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) _lowercase =log_spec[:, :-1] _lowercase =np.maximum(lowerCAmelCase , log_spec.max() - 8.0 ) _lowercase =(log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _lowercase =np.array(lowerCAmelCase , np.intaa ) _lowercase =[] for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ): _lowercase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _lowercase =padding_value normed_input_values.append(lowerCAmelCase ) else: _lowercase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "max_length" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase =isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase =is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): _lowercase =np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase =raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase =[np.asarray([raw_speech] ).T] _lowercase =BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding _lowercase =self.pad( lowerCAmelCase , padding=lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _lowercase =self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) _lowercase =np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format _lowercase =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) _lowercase =[self._np_extract_fbank_features(lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , lowerCAmelCase ): _lowercase =[np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in input_features] else: _lowercase =input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _lowercase =padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: _lowercase =padded_inputs.convert_to_tensors(lowerCAmelCase ) return padded_inputs def A__ ( self ) -> Dict[str, Any]: '''simple docstring''' _lowercase =copy.deepcopy(self.__dict__ ) _lowercase =self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
205
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Dict = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys _lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
272
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata _lowercase : List[str] = "" if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class __SCREAMING_SNAKE_CASE ( tr.AbstractTransform ): '''simple docstring''' def __init__( self : List[Any], lowerCamelCase : str = " " )-> List[str]: lowerCamelCase__ : List[str] =sentence_delimiter def snake_case ( self : Any, lowerCamelCase : str )-> Optional[Any]: return list(lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : List[str] )-> Tuple: lowerCamelCase__ : Optional[int] =[] for sent_idx, sentence in enumerate(lowerCamelCase ): chars.extend(self.process_string(lowerCamelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase ) - 1: chars.append(self.sentence_delimiter ) return chars _lowercase : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _lowercase : List[str] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _lowercase : Dict = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" _lowercase : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" _lowercase : Dict = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case ( self : Dict )-> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ], ) def snake_case ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=False )-> List[Any]: if concatenate_texts: return jiwer.compute_measures( lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, )["wer"] lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : Union[str, Any] =0 for prediction, reference in zip(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : int =jiwer.compute_measures( lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
272
1
from __future__ import annotations from math import gcd def a__ ( __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 1 , __UpperCamelCase = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: return (pow(__UpperCamelCase , 2 ) + step) % modulus for _ in range(__UpperCamelCase ): # These track the position within the cycle detection logic. SCREAMING_SNAKE_CASE_ = seed SCREAMING_SNAKE_CASE_ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. SCREAMING_SNAKE_CASE_ = gcd(hare - tortoise , __UpperCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. SCREAMING_SNAKE_CASE_ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse A : Dict = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) A : Dict = parser.parse_args() A : Union[str, Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"{args.num} is probably prime") else: A : Optional[Any] = args.num // divisor print(f"{args.num} = {divisor} * {quotient}")
118
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''char''' lowerCamelCase__ = '''bpe''' lowerCamelCase__ = '''wp''' A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''image_processor''', '''char_tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = '''MgpstrTokenizer''' def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __magic_name__ , ) SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) SCREAMING_SNAKE_CASE_ = tokenizer SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__magic_name__ , __magic_name__ ) def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int: if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None: SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE_ = encodings["input_ids"] return inputs def __A ( self : Tuple , __magic_name__ : int ) -> Any: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences SCREAMING_SNAKE_CASE_ = char_preds.size(0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]] SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]] SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = final_strs SCREAMING_SNAKE_CASE_ = final_scores SCREAMING_SNAKE_CASE_ = char_strs SCREAMING_SNAKE_CASE_ = bpe_strs SCREAMING_SNAKE_CASE_ = wp_strs return out def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any: if format == DecodeType.CHARACTER: SCREAMING_SNAKE_CASE_ = self.char_decode SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = "[s]" elif format == DecodeType.BPE: SCREAMING_SNAKE_CASE_ = self.bpe_decode SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = "#" elif format == DecodeType.WORDPIECE: SCREAMING_SNAKE_CASE_ = self.wp_decode SCREAMING_SNAKE_CASE_ = 102 SCREAMING_SNAKE_CASE_ = "[SEP]" else: raise ValueError(F'''Format {format} is not supported.''' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [] SCREAMING_SNAKE_CASE_ = pred_logits.size(0 ) SCREAMING_SNAKE_CASE_ = pred_logits.size(1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ ) SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:] SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 ) SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:] for index in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ ) SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos] SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist() SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1 SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1] SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__magic_name__ ) conf_scores.append(__magic_name__ ) return dec_strs, conf_scores def __A ( self : Any , __magic_name__ : Dict ) -> List[str]: SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )] return decode_strs def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple: return self.bpe_tokenizer.batch_decode(__magic_name__ ) def __A ( self : str , __magic_name__ : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )] return decode_strs
118
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE_:Optional[int] = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:str = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
354
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE_:Any = { """configuration_mobilenet_v2""": [ """MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileNetV2Config""", """MobileNetV2OnnxConfig""", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""] SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileNetV2ForImageClassification""", """MobileNetV2ForSemanticSegmentation""", """MobileNetV2Model""", """MobileNetV2PreTrainedModel""", """load_tf_weights_in_mobilenet_v2""", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
115
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = ['pixel_values'] def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = 1 / 2_5_5 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ): super().__init__(**__snake_case ) snake_case = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} snake_case = get_size_dict(__snake_case , default_to_square=__snake_case ) snake_case = do_resize snake_case = size snake_case = resample snake_case = do_rescale snake_case = rescale_factor snake_case = do_normalize snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case = image_std if image_std is not None else OPENAI_CLIP_STD snake_case = do_convert_rgb def a_ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ): snake_case = get_size_dict(__snake_case , default_to_square=__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) snake_case = (size['''height'''], size['''width''']) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def a_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ): return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ): return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def a_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ): snake_case = do_resize if do_resize is not None else self.do_resize snake_case = resample if resample is not None else self.resample snake_case = do_rescale if do_rescale is not None else self.do_rescale snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case = do_normalize if do_normalize is not None else self.do_normalize snake_case = image_mean if image_mean is not None else self.image_mean snake_case = image_std if image_std is not None else self.image_std snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case = size if size is not None else self.size snake_case = get_size_dict(__snake_case , default_to_square=__snake_case ) snake_case = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case = [convert_to_rgb(__snake_case ) for image in images] # All transformations expect numpy arrays. snake_case = [to_numpy_array(__snake_case ) for image in images] if do_resize: snake_case = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images] if do_rescale: snake_case = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_normalize: snake_case = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images] snake_case = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] snake_case = BatchFeature(data={'''pixel_values''': images} , tensor_type=__snake_case ) return encoded_outputs
127
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[Any] = { "google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json", "google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json", "google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json", "google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = 'mobilenet_v2' def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ): super().__init__(**__snake_case ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) snake_case = num_channels snake_case = image_size snake_case = depth_multiplier snake_case = depth_divisible_by snake_case = min_depth snake_case = expand_ratio snake_case = output_stride snake_case = first_layer_is_expansion snake_case = finegrained_output snake_case = hidden_act snake_case = tf_padding snake_case = classifier_dropout_prob snake_case = initializer_range snake_case = layer_norm_eps snake_case = semantic_loss_ignore_index class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = version.parse('1.11' ) @property def a_ ( self ): return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def a_ ( self ): if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def a_ ( self ): return 1E-4
127
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = ['''pixel_values'''] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : str = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} UpperCAmelCase_ : List[Any] = get_size_dict(_UpperCamelCase ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} UpperCAmelCase_ : Optional[int] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='crop_size' ) UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : List[str] = do_rescale UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : Tuple = do_center_crop UpperCAmelCase_ : Any = crop_size UpperCAmelCase_ : Optional[int] = size UpperCAmelCase_ : List[Any] = resample UpperCAmelCase_ : Dict = rescale_factor UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray: UpperCAmelCase_ : int = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: UpperCAmelCase_ : int = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCAmelCase_ : Tuple = (size['height'], size['width']) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray: UpperCAmelCase_ : Any = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray: return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray: return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> BatchFeature: UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : Any = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : Optional[int] = get_size_dict(_UpperCamelCase , param_name='crop_size' , default_to_square=_UpperCamelCase ) UpperCAmelCase_ : Any = resample if resample is not None else self.resample UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Any = image_std if image_std is not None else self.image_std UpperCAmelCase_ : List[str] = size if size is not None else self.size UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): UpperCAmelCase_ : Dict = [images] if not valid_images(_UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. UpperCAmelCase_ : Any = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: UpperCAmelCase_ : List[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: UpperCAmelCase_ : Tuple = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: UpperCAmelCase_ : int = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] UpperCAmelCase_ : Tuple = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] UpperCAmelCase_ : Tuple = {'pixel_values': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
365
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = Dict[str, Any] __UpperCAmelCase = List[Prediction] @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Tuple: UpperCAmelCase_ : str = {} if "threshold" in kwargs: UpperCAmelCase_ : Tuple = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[Predictions, List[Prediction]]: return super().__call__(*_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Any = load_image(_UpperCamelCase ) UpperCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] ) UpperCAmelCase_ : str = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: UpperCAmelCase_ : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) UpperCAmelCase_ : Any = target_size return inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Any = model_inputs.pop('target_size' ) UpperCAmelCase_ : Optional[Any] = self.model(**_UpperCamelCase ) UpperCAmelCase_ : Dict = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: UpperCAmelCase_ : List[str] = model_inputs['bbox'] return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.9 ) -> List[str]: UpperCAmelCase_ : List[Any] = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. UpperCAmelCase_ , UpperCAmelCase_ : str = target_size[0].tolist() def unnormalize(_UpperCamelCase ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ] ) ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] UpperCAmelCase_ : Tuple = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['bbox'].squeeze(0 )] UpperCAmelCase_ : List[str] = ['score', 'label', 'box'] UpperCAmelCase_ : Any = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel UpperCAmelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : str = raw_annotations[0] UpperCAmelCase_ : str = raw_annotation['scores'] UpperCAmelCase_ : Tuple = raw_annotation['labels'] UpperCAmelCase_ : List[Any] = raw_annotation['boxes'] UpperCAmelCase_ : Union[str, Any] = scores.tolist() UpperCAmelCase_ : int = [self.model.config.idalabel[label.item()] for label in labels] UpperCAmelCase_ : Any = [self._get_bounding_box(_UpperCamelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] UpperCAmelCase_ : int = ['score', 'label', 'box'] UpperCAmelCase_ : Dict = [ dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]: if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = box.int().tolist() UpperCAmelCase_ : str = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
145
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : List[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
228
from __future__ import annotations def __A ( __lowerCamelCase , __lowerCamelCase ) -> float: a = sorted(numsa + numsa ) a , a = divmod(len(__lowerCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] __UpperCamelCase : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
228
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: if not isinstance(_lowercase , _lowercase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCAmelCase : Optional[Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
360
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType a : int = logging.get_logger(__name__) a : int = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off a : Tuple = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] a : Optional[int] = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class UpperCamelCase_ ( __magic_name__ ): lowercase = 'whisper' lowercase = ['past_key_values'] lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A=51865 , A=80 , A=6 , A=4 , A=6 , A=4 , A=1536 , A=1536 , A=0.0 , A=0.0 , A=50257 , A=True , A=True , A="gelu" , A=256 , A=0.0 , A=0.0 , A=0.0 , A=0.0_2 , A=False , A=1500 , A=448 , A=50256 , A=50256 , A=50256 , A=None , A=[220, 50256] , A=False , A=256 , A=False , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=7 , **A , ) -> Optional[Any]: UpperCAmelCase : str = vocab_size UpperCAmelCase : Union[str, Any] = num_mel_bins UpperCAmelCase : Tuple = d_model UpperCAmelCase : Optional[int] = encoder_layers UpperCAmelCase : List[str] = encoder_attention_heads UpperCAmelCase : Optional[int] = decoder_layers UpperCAmelCase : int = decoder_attention_heads UpperCAmelCase : Optional[int] = decoder_ffn_dim UpperCAmelCase : Union[str, Any] = encoder_ffn_dim UpperCAmelCase : List[str] = dropout UpperCAmelCase : Optional[Any] = attention_dropout UpperCAmelCase : Optional[Any] = activation_dropout UpperCAmelCase : Optional[Any] = activation_function UpperCAmelCase : Optional[Any] = init_std UpperCAmelCase : int = encoder_layerdrop UpperCAmelCase : Dict = decoder_layerdrop UpperCAmelCase : Optional[int] = use_cache UpperCAmelCase : List[str] = encoder_layers UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : Union[str, Any] = max_source_positions UpperCAmelCase : Tuple = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : List[str] = classifier_proj_size UpperCAmelCase : Optional[Any] = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Optional[Any] = apply_spec_augment UpperCAmelCase : int = mask_time_prob UpperCAmelCase : int = mask_time_length UpperCAmelCase : Dict = mask_time_min_masks UpperCAmelCase : List[str] = mask_feature_prob UpperCAmelCase : Optional[int] = mask_feature_length UpperCAmelCase : int = mask_feature_min_masks UpperCAmelCase : List[Any] = median_filter_width super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , ) class UpperCamelCase_ ( __magic_name__ ): @property def _lowercase( self ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase : str = OrderedDict( [ ("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}), ] ) if self.use_past: UpperCAmelCase : List[Any] = {0: """batch"""} else: UpperCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(A , direction="""inputs""" ) return common_inputs def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 22050 , A = 5.0 , A = 220 , ) -> Mapping[str, Any]: UpperCAmelCase : Optional[int] = OrderedDict() UpperCAmelCase : Any = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , ) UpperCAmelCase : List[str] = encoder_inputs["""input_features"""].shape[2] UpperCAmelCase : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase : Any = super().generate_dummy_inputs( preprocessor.tokenizer , A , A , A , A ) UpperCAmelCase : List[str] = encoder_inputs.pop("""input_features""" ) UpperCAmelCase : Any = decoder_inputs.pop("""decoder_input_ids""" ) if "past_key_values" in decoder_inputs: UpperCAmelCase : Union[str, Any] = decoder_inputs.pop("""past_key_values""" ) return dummy_inputs @property def _lowercase( self ) -> float: return 1e-3
338
0
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline lowerCAmelCase__ : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ) -> List[str]: output_path.parent.mkdir(parents=A__ , exist_ok=A__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , ) else: export( A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , ) @torch.no_grad() def UpperCamelCase__ ( A__ , A__ , A__ , A__ = False ) -> Optional[int]: snake_case__ : Optional[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): snake_case__ : Union[str, Any] = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: snake_case__ : Tuple = 'cpu' snake_case__ : Optional[int] = StableDiffusionPipeline.from_pretrained(A__ , torch_dtype=A__ ).to(A__ ) snake_case__ : Any = Path(A__ ) # TEXT ENCODER snake_case__ : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings snake_case__ : List[str] = pipeline.text_encoder.config.hidden_size snake_case__ : Union[str, Any] = pipeline.tokenizer( 'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=A__ , return_tensors='pt' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'sequence'}, } , opset=A__ , ) del pipeline.text_encoder # UNET snake_case__ : str = pipeline.unet.config.in_channels snake_case__ : List[str] = pipeline.unet.config.sample_size snake_case__ : Any = output_path / 'unet' / 'model.onnx' onnx_export( pipeline.unet , model_args=( torch.randn(2 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ), torch.randn(2 ).to(device=A__ , dtype=A__ ), torch.randn(2 , A__ , A__ ).to(device=A__ , dtype=A__ ), False, ) , output_path=A__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'timestep': {0: 'batch'}, 'encoder_hidden_states': {0: 'batch', 1: 'sequence'}, } , opset=A__ , use_external_data_format=A__ , ) snake_case__ : Tuple = str(unet_path.absolute().as_posix() ) snake_case__ : Dict = os.path.dirname(A__ ) snake_case__ : Tuple = onnx.load(A__ ) # clean up existing tensor files shutil.rmtree(A__ ) os.mkdir(A__ ) # collate external tensor files into one onnx.save_model( A__ , A__ , save_as_external_data=A__ , all_tensors_to_one_file=A__ , location='weights.pb' , convert_attribute=A__ , ) del pipeline.unet # VAE ENCODER snake_case__ : Optional[int] = pipeline.vae snake_case__ : Any = vae_encoder.config.in_channels snake_case__ : Dict = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder snake_case__ : Tuple = lambda A__ , A__ : vae_encoder.encode(A__ , A__ )[0].sample() onnx_export( A__ , model_args=( torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ), False, ) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=A__ , ) # VAE DECODER snake_case__ : Optional[Any] = pipeline.vae snake_case__ : Any = vae_decoder.config.latent_channels snake_case__ : List[str] = vae_decoder.config.out_channels # forward only through the decoder part snake_case__ : Optional[int] = vae_encoder.decode onnx_export( A__ , model_args=( torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=A__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: snake_case__ : Optional[int] = pipeline.safety_checker snake_case__ : Tuple = safety_checker.config.vision_config.num_channels snake_case__ : Dict = safety_checker.config.vision_config.image_size snake_case__ : Optional[int] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , A__ , A__ , A__ , ).to(device=A__ , dtype=A__ ), torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ), ) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={ 'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'}, } , opset=A__ , ) del pipeline.safety_checker snake_case__ : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' ) snake_case__ : Tuple = pipeline.feature_extractor else: snake_case__ : Dict = None snake_case__ : Optional[int] = None snake_case__ : Optional[Any] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=A__ , feature_extractor=A__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(A__ ) print('ONNX pipeline saved to' , A__ ) del pipeline del onnx_pipeline snake_case__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(A__ , provider='CPUExecutionProvider' ) print('ONNX pipeline is loadable' ) if __name__ == "__main__": lowerCAmelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') lowerCAmelCase__ : Union[str, Any] = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
143
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : int = logging.get_logger(__name__) def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]: snake_case__ : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'deit.embeddings.cls_token'), ('dist_token', 'deit.embeddings.distillation_token'), ('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'deit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('norm.weight', 'deit.layernorm.weight'), ('norm.bias', 'deit.layernorm.bias'), ('head.weight', 'cls_classifier.weight'), ('head.bias', 'cls_classifier.bias'), ('head_dist.weight', 'distillation_classifier.weight'), ('head_dist.bias', 'distillation_classifier.bias'), ] ) return rename_keys def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = '' else: snake_case__ : List[Any] = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : int = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size] snake_case__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : Tuple = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : int = in_proj_bias[-config.hidden_size :] def UpperCamelCase__ ( A__ , A__ , A__ ) -> str: snake_case__ : Optional[int] = dct.pop(A__ ) snake_case__ : int = val def UpperCamelCase__ ( ) -> Dict: snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ ) -> List[str]: snake_case__ : List[Any] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : Any = 1000 snake_case__ : Union[str, Any] = 'huggingface/label-files' snake_case__ : int = 'imagenet-1k-id2label.json' snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : str = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('tiny' ): snake_case__ : Optional[int] = 192 snake_case__ : str = 768 snake_case__ : Optional[Any] = 12 snake_case__ : Tuple = 3 elif deit_name[9:].startswith('small' ): snake_case__ : str = 384 snake_case__ : str = 1536 snake_case__ : Dict = 12 snake_case__ : str = 6 if deit_name[9:].startswith('base' ): pass elif deit_name[4:].startswith('large' ): snake_case__ : List[Any] = 1024 snake_case__ : str = 4096 snake_case__ : Tuple = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Tuple = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) # load HuggingFace model snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval() model.load_state_dict(A__ ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : Union[str, Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size ) snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' ) snake_case__ : Tuple = encoding['pixel_values'] snake_case__ : Dict = model(A__ ) snake_case__ : Union[str, Any] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase__ : int = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
143
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { 'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json', # See all Dinat models at https://huggingface.co/models?filter=dinat } class _a ( UpperCamelCase__ , UpperCamelCase__ ): _lowercase : Tuple = '''dinat''' _lowercase : List[str] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self: List[Any] , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Dict=3 , UpperCamelCase_: str=64 , UpperCamelCase_: Dict=[3, 4, 6, 5] , UpperCamelCase_: Optional[int]=[2, 4, 8, 16] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_: List[Any]=3.0 , UpperCamelCase_: Dict=True , UpperCamelCase_: str=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None , **UpperCamelCase_: int , ) -> List[str]: """simple docstring""" super().__init__(**UpperCamelCase_ ) lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = depths lowercase__ = len(UpperCamelCase_ ) lowercase__ = num_heads lowercase__ = kernel_size lowercase__ = dilations lowercase__ = mlp_ratio lowercase__ = qkv_bias lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = drop_path_rate lowercase__ = hidden_act lowercase__ = layer_norm_eps lowercase__ = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) lowercase__ = layer_scale_init_value lowercase__ = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] lowercase__ , lowercase__ = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
93
import logging from transformers import PretrainedConfig lowerCAmelCase = logging.getLogger(__name__) lowerCAmelCase = { 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class _a ( UpperCamelCase__ ): _lowercase : List[Any] = '''bertabs''' def __init__( self: List[str] , UpperCamelCase_: Dict=30_522 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: int=512 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=2_048 , UpperCamelCase_: str=0.2 , **UpperCamelCase_: Any , ) -> List[str]: """simple docstring""" super().__init__(**UpperCamelCase_ ) lowercase__ = vocab_size lowercase__ = max_pos lowercase__ = enc_layers lowercase__ = enc_hidden_size lowercase__ = enc_heads lowercase__ = enc_ff_size lowercase__ = enc_dropout lowercase__ = dec_layers lowercase__ = dec_hidden_size lowercase__ = dec_heads lowercase__ = dec_ff_size lowercase__ = dec_dropout
93
1
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers A__ : Union[str, Any] = float('''nan''') class __snake_case : def __init__( self : Tuple , A_ : Optional[Any]): lowerCAmelCase_ : str = sys.stdout lowerCAmelCase_ : Tuple = open(A_ , '''a''') def __getattr__( self : Dict , A_ : Optional[int]): return getattr(self.stdout , A_) def UpperCAmelCase__ ( self : Any , A_ : Any): self.stdout.write(A_) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , A_ , 0 , re.M)) def UpperCamelCase( __UpperCamelCase : str=80 ,__UpperCamelCase : int=False ): lowerCAmelCase_ : int = [] # deal with critical env vars lowerCAmelCase_ : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: lowerCAmelCase_ : Dict = os.environ.get(__UpperCamelCase ,__UpperCamelCase ) if val is not None: cmd.append(f"""{key}={val}""" ) # python executable (not always needed if the script is executable) lowerCAmelCase_ : Tuple = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(__UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote ,sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : Dict = '''''' while len(__UpperCamelCase ) > 0: current_line += f"""{cmd.pop(0 )} """ if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(__UpperCamelCase ) lowerCAmelCase_ : int = '''''' return "\\\n".join(__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ): # unwrap multi-line input lowerCAmelCase_ : Tuple = re.sub(R'''[\\\n]+''' ,''' ''' ,args.base_cmd ) # remove --output_dir if any and set our own lowerCAmelCase_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' ,'''''' ,args.base_cmd ) args.base_cmd += f""" --output_dir {output_dir}""" # ensure we have --overwrite_output_dir lowerCAmelCase_ : Any = re.sub('''--overwrite_output_dir\s+''' ,'''''' ,args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 ,100 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} ,) lowerCAmelCase_ : Any = subprocess.run(__UpperCamelCase ,capture_output=__UpperCamelCase ,text=__UpperCamelCase ) if verbose: print('''STDOUT''' ,result.stdout ) print('''STDERR''' ,result.stderr ) # save the streams lowerCAmelCase_ : Tuple = variation.replace(''' ''' ,'''-''' ) with open(Path(__UpperCamelCase ) / f"""log.{prefix}.stdout.txt""" ,'''w''' ) as f: f.write(result.stdout ) with open(Path(__UpperCamelCase ) / f"""log.{prefix}.stderr.txt""" ,'''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f"""{output_dir}/all_results.json""" ,'''r''' ,encoding='''utf-8''' ) as f: lowerCAmelCase_ : List[str] = json.load(__UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,): lowerCAmelCase_ : Any = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : List[Any] = f"""{id}: {variation:<{longest_variation_len}}""" lowerCAmelCase_ : Tuple = f"""{preamble}: """ lowerCAmelCase_ : Optional[Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(__UpperCamelCase ) ,desc=__UpperCamelCase ,leave=__UpperCamelCase ): lowerCAmelCase_ : str = process_run_single( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Tuple = single_run_metrics[target_metric_key] if not math.isnan(__UpperCamelCase ): metrics.append(__UpperCamelCase ) results.append(__UpperCamelCase ) outcome += "✓" else: outcome += "✘" lowerCAmelCase_ : List[str] = f"""\33[2K\r{outcome}""" if len(__UpperCamelCase ) > 0: lowerCAmelCase_ : Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} lowerCAmelCase_ : Tuple = round(mean_metrics[target_metric_key] ,2 ) lowerCAmelCase_ : Optional[int] = f"""{outcome} {mean_target}""" if len(__UpperCamelCase ) > 1: results_str += f""" {tuple(round(__UpperCamelCase ,2 ) for x in results )}""" print(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = variation return mean_metrics else: print(__UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def UpperCamelCase( ): lowerCAmelCase_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f""" Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB """ def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ): lowerCAmelCase_ : Optional[Any] = pd.DataFrame(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = '''variation''' lowerCAmelCase_ : List[str] = '''diff_%''' lowerCAmelCase_ : str = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan lowerCAmelCase_ : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(__UpperCamelCase ): # as a fallback, use the minimal value as the sentinel lowerCAmelCase_ : Dict = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(__UpperCamelCase ): lowerCAmelCase_ : Any = df.apply( lambda __UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 ,axis='''columns''' ,) # re-order columns lowerCAmelCase_ : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] lowerCAmelCase_ : Optional[Any] = df.reindex(__UpperCamelCase ,axis='''columns''' ) # reorder cols # capitalize lowerCAmelCase_ : Any = df.rename(str.capitalize ,axis='''columns''' ) # make the cols as narrow as possible lowerCAmelCase_ : Optional[Any] = df.rename(lambda __UpperCamelCase : c.replace('''_''' ,'''<br>''' ) ,axis='''columns''' ) lowerCAmelCase_ : Union[str, Any] = df.rename(lambda __UpperCamelCase : c.replace('''_''' ,'''\n''' ) ,axis='''columns''' ) lowerCAmelCase_ : List[str] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=__UpperCamelCase ,floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=__UpperCamelCase ,floatfmt='''.2f''' )] print('''\n\n'''.join(__UpperCamelCase ) ) def UpperCamelCase( ): lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' ,default=__UpperCamelCase ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='''Base cmd''' ,) parser.add_argument( '''--variations''' ,default=__UpperCamelCase ,type=__UpperCamelCase ,nargs='''+''' ,required=__UpperCamelCase ,help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' ,) parser.add_argument( '''--base-variation''' ,default=__UpperCamelCase ,type=__UpperCamelCase ,help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' ,) parser.add_argument( '''--target-metric-key''' ,default=__UpperCamelCase ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' ,) parser.add_argument( '''--report-metric-keys''' ,default='''''' ,type=__UpperCamelCase ,help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' ,) parser.add_argument( '''--repeat-times''' ,default=1 ,type=__UpperCamelCase ,help='''How many times to re-run each variation - an average will be reported''' ,) parser.add_argument( '''--output_dir''' ,default='''output_benchmark''' ,type=__UpperCamelCase ,help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' ,) parser.add_argument( '''--verbose''' ,default=__UpperCamelCase ,action='''store_true''' ,help='''Whether to show the outputs of each run or just the benchmark progress''' ,) lowerCAmelCase_ : List[Any] = parser.parse_args() lowerCAmelCase_ : Union[str, Any] = args.output_dir Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) lowerCAmelCase_ : List[Any] = get_base_command(__UpperCamelCase ,__UpperCamelCase ) # split each dimension into its --foo variations lowerCAmelCase_ : Dict = [list(map(str.strip ,re.split(R'''\|''' ,__UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowerCAmelCase_ : Union[str, Any] = list(map(str.strip ,map(''' '''.join ,itertools.product(*__UpperCamelCase ) ) ) ) lowerCAmelCase_ : int = max(len(__UpperCamelCase ) for x in variations ) # split wanted keys lowerCAmelCase_ : List[str] = args.report_metric_keys.split() # capture prints into a log file for convenience lowerCAmelCase_ : Optional[int] = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt""" print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" ) print(f"""and this script's output is also piped into {report_fn}""" ) lowerCAmelCase_ : Optional[Any] = Tee(__UpperCamelCase ) print(f"""\n*** Running {len(__UpperCamelCase )} benchmarks:""" ) print(f"""Base command: {" ".join(__UpperCamelCase )}""" ) lowerCAmelCase_ : int = '''variation''' lowerCAmelCase_ : List[Any] = [] for id, variation in enumerate(tqdm(__UpperCamelCase ,desc='''Total completion: ''' ,leave=__UpperCamelCase ) ): lowerCAmelCase_ : int = base_cmd + variation.split() results.append( process_run( id + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.target_metric_key ,__UpperCamelCase ,args.repeat_times ,__UpperCamelCase ,args.verbose ,) ) process_results(__UpperCamelCase ,args.target_metric_key ,__UpperCamelCase ,args.base_variation ,__UpperCamelCase ) if __name__ == "__main__": main()
103
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
35
0
"""simple docstring""" _A = 8.314_462 # Unit - J mol-1 K-1 def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
360
"""simple docstring""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowercase_ ( __UpperCAmelCase ) -> str: if isinstance(__UpperCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class _lowerCamelCase : def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> int: """simple docstring""" pass def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass def _lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" pass def _lowerCAmelCase ( self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(UpperCamelCase ) lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Union[str, Any] ) -> Dict: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , **UpperCamelCase : Optional[Any] ) -> Any: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None , **UpperCamelCase : Optional[Any] ) -> Any: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase ) lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase ) lowerCAmelCase__ : int = after_output[0].numpy() lowerCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase , 1E-5 ) def _lowerCAmelCase ( self : int , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=None , **UpperCamelCase : Optional[int] ) -> Dict: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : str = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : int = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase ) lowerCAmelCase__ : Dict = model( input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : Tuple = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : List[str] = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : List[str] = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase , UpperCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**UpperCamelCase ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" lowerCAmelCase__ : Any = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase ) def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase ) def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase ) def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase ) @slow def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_pretrained_model_and_inputs() lowerCAmelCase__ : Union[str, Any] = model_a(**UpperCamelCase ) lowerCAmelCase__ : Any = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase ) lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : List[str] = model_a(**UpperCamelCase ) lowerCAmelCase__ : Dict = after_outputs[0].numpy() lowerCAmelCase__ : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase , 1E-5 ) @require_tf class _lowerCamelCase ( a_ , unittest.TestCase ): def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) lowerCAmelCase__ : Optional[Any] = 13 lowerCAmelCase__ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ) -> str: """simple docstring""" lowerCAmelCase__ : str = TFViTModel(UpperCamelCase , name="""vision_model""" ) lowerCAmelCase__ : Any = TFBertModel(UpperCamelCase , name="""text_model""" ) return vision_model, text_model def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" lowerCAmelCase__ : str = TFViTModelTester(self ) lowerCAmelCase__ : str = TFBertModelTester(self ) lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _lowerCamelCase ( a_ , unittest.TestCase ): def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) lowerCAmelCase__ : str = 13 lowerCAmelCase__ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str=None , **UpperCamelCase : Optional[Any] ) -> int: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = model( input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCAmelCase__ : Dict = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : Optional[Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCAmelCase ( self : int , UpperCamelCase : Any , UpperCamelCase : str ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Any = TFDeiTModel(UpperCamelCase , name="""vision_model""" ) lowerCAmelCase__ : str = TFRobertaModel(UpperCamelCase , name="""text_model""" ) return vision_model, text_model def _lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[Any] = TFDeiTModelTester(self ) lowerCAmelCase__ : Union[str, Any] = TFRobertaModelTester(self ) lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _lowerCamelCase ( a_ , unittest.TestCase ): def _lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) lowerCAmelCase__ : Any = 13 lowerCAmelCase__ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : str = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _lowerCAmelCase ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[Any] ) -> Any: """simple docstring""" lowerCAmelCase__ : int = TFCLIPVisionModel(UpperCamelCase , name="""vision_model""" ) lowerCAmelCase__ : List[str] = TFBertModel(UpperCamelCase , name="""text_model""" ) return vision_model, text_model def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : str = TFCLIPVisionModelTester(self ) lowerCAmelCase__ : int = TFBertModelTester(self ) lowerCAmelCase__ : str = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Optional[int] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _lowerCamelCase ( unittest.TestCase ): @slow def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=UpperCamelCase ) lowerCAmelCase__ : Any = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCAmelCase__ : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCamelCase , padding=UpperCamelCase , return_tensors="""np""" ) lowerCAmelCase__ : Tuple = model(**UpperCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase , atol=1E-3 ) )
212
0
def a ( lowerCamelCase_ ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(lowerCamelCase_ ) ) lowercase__ = [list(lowerCamelCase_ ) for char in range(len(lowerCamelCase_ ) )] for index in range(len(lowerCamelCase_ ) ): num_transpositions[index].pop(lowerCamelCase_ ) return max( int(''''''.join(list(lowerCamelCase_ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
207
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS A__ : Tuple = logging.get_logger(__name__) A__ : int = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _UpperCAmelCase ( A__ ): """simple docstring""" def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ): '''simple docstring''' super().__init__(*lowerCamelCase, **lowerCamelCase ) if config is None: assert isinstance(self.model, lowerCamelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) lowercase__ = self.model.config else: lowercase__ = config lowercase__ = data_args lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase__ = label_smoothed_nll_loss def lowercase__ ( self : List[Any], lowerCamelCase : int ): '''simple docstring''' if self.optimizer is None: lowercase__ = ['''bias''', '''LayerNorm.weight'''] lowercase__ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase__ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase__ = Adafactor lowercase__ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase__ = AdamW lowercase__ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase__ = self.args.learning_rate if self.sharded_ddp: lowercase__ = OSS( params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, ) else: lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase ) if self.lr_scheduler is None: lowercase__ = self._get_lr_scheduler(lowerCamelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase__ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps ) else: lowercase__ = schedule_func( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase ) return scheduler def lowercase__ ( self : List[Any] ): '''simple docstring''' if isinstance(self.train_dataset, torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0] lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) ) else: # compute usual loss via models lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2] else: # compute label smoothed loss lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0] lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 ) lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id ) return loss, logits def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ): '''simple docstring''' lowercase__ = inputs.pop('''labels''' ) lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return loss def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ): '''simple docstring''' lowercase__ = self._prepare_inputs(lowerCamelCase ) lowercase__ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase__ = self.model.generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] ) lowercase__ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowercase__ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase__ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] ) return (loss, logits, labels) def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ): '''simple docstring''' # If PAD token is not defined at least EOS token has to be defined lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) lowercase__ = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) lowercase__ = tensor return padded_tensor
207
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[int] = s.rsplit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return new.join(SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = {} A_ : Any = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: A_ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: A_ : Union[str, Any] = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): A_ : int = rreplace(SCREAMING_SNAKE_CASE , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): A_ : Optional[int] = rreplace(SCREAMING_SNAKE_CASE , '''.b''' , '''.bias''' , 1 ) A_ : Optional[int] = value.float() return upgrade @torch.no_grad() def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): from dall_e import Encoder A_ : Optional[int] = Encoder() if os.path.exists(SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE ) else: A_ : List[str] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[int] = ckpt.state_dict() encoder.load_state_dict(SCREAMING_SNAKE_CASE ) if config_path is not None: A_ : List[Any] = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: A_ : str = FlavaImageCodebookConfig() A_ : Optional[int] = FlavaImageCodebook(SCREAMING_SNAKE_CASE ).eval() A_ : Dict = encoder.state_dict() A_ : Union[str, Any] = upgrade_state_dict(SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE ) A_ : Optional[int] = hf_model.state_dict() A_ : Tuple = count_parameters(SCREAMING_SNAKE_CASE ) A_ : List[str] = count_parameters(SCREAMING_SNAKE_CASE ) assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(SCREAMING_SNAKE_CASE ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") UpperCamelCase = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
359
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): if len(SCREAMING_SNAKE_CASE ) <= 1: return [tuple(SCREAMING_SNAKE_CASE )] A_ : Any = [] def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Dict = [0] * n res.append(tuple(SCREAMING_SNAKE_CASE ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : Optional[int] = arr[i], arr[0] else: A_ , A_ : Tuple = arr[i], arr[c[i]] res.append(tuple(SCREAMING_SNAKE_CASE ) ) c[i] += 1 A_ : Optional[int] = 0 else: A_ : str = 0 i += 1 generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
65
0
'''simple docstring''' import os import string import sys __lowercase = 1 << 8 __lowercase = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __lowercase = KEYMAP['''up'''] __lowercase = KEYMAP['''left'''] if sys.platform == "win32": __lowercase = [] __lowercase = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __lowercase = ord(str(i)) def snake_case__ ( ) -> List[Any]: '''simple docstring''' if os.name == "nt": import msvcrt lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(_A ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(_A ) try: tty.setraw(_A ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def snake_case__ ( ) -> Tuple: '''simple docstring''' lowerCAmelCase = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
272
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
1
"""simple docstring""" from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a__ : Union[str, Any] = datasets.load_iris() a__ : Any = np.array(data['''data''']) a__ : Dict = np.array(data['''target''']) a__ : Optional[int] = data['''target_names'''] a__ , a__ , a__ , a__ : Dict = train_test_split(X, y) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return np.linalg.norm(np.array(lowerCAmelCase_ ) - np.array(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = zip(lowerCAmelCase_ , lowerCAmelCase_ ) # List of distances of all points from the point to be classified __SCREAMING_SNAKE_CASE = [] for data_point in data: __SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , lowerCAmelCase_ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __SCREAMING_SNAKE_CASE = [i[1] for i in sorted(lowerCAmelCase_ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
195
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=1_3 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[str]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=1_0 , UpperCAmelCase__ : str=0.02 , ) -> Tuple: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = FlaxViTModel(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Tuple: __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase_ ( self : int ) -> None: __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Tuple ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ): return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/vit-base-patch16-224" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ )
195
1