code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def _A ( lowercase , lowercase ): """simple docstring""" return number | (1 << position) def _A ( lowercase , lowercase ): """simple docstring""" return number & ~(1 << position) def _A ( lowercase , lowercase ): """simple docstring""" return number ^ (1 << position) def _A ( lowercase , lowercase ): """simple docstring""" return ((number >> position) & 1) == 1 def _A ( lowercase , lowercase ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
81
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''unispeech''' def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1e-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(512, 512, 512, 512, 512, 512, 512) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=320 , _snake_case=2 , _snake_case=0.1 , _snake_case=100 , _snake_case=256 , _snake_case=256 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=80 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=0.5 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_norm _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(_snake_case ) _lowerCAmelCase = list(_snake_case ) _lowerCAmelCase = list(_snake_case ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = num_ctc_classes _lowerCAmelCase = vocab_size _lowerCAmelCase = do_stable_layer_norm _lowerCAmelCase = use_weighted_layer_sum _lowerCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = apply_spec_augment _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase = num_codevectors_per_group _lowerCAmelCase = num_codevector_groups _lowerCAmelCase = contrastive_logits_temperature _lowerCAmelCase = feat_quantizer_dropout _lowerCAmelCase = num_negatives _lowerCAmelCase = codevector_dim _lowerCAmelCase = proj_codevector_dim _lowerCAmelCase = diversity_loss_weight # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # pretraining loss _lowerCAmelCase = replace_prob @property def snake_case ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
82
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowercase__ ( metaclass=lowercase ): lowercase__ = ["""note_seq"""] def __init__( self : Tuple ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ): '''simple docstring''' requires_backends(self ,['note_seq'] ) @classmethod def UpperCamelCase_ ( cls : List[str] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : int ): '''simple docstring''' requires_backends(cls ,['note_seq'] ) @classmethod def UpperCamelCase_ ( cls : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Any ): '''simple docstring''' requires_backends(cls ,['note_seq'] )
83
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" import baseaa def _snake_case ( lowercase__ : str ) -> bytes: '''simple docstring''' return baseaa.aaaencode(string.encode("""utf-8""" ) ) def _snake_case ( lowercase__ : bytes ) -> str: '''simple docstring''' return baseaa.aaadecode(lowercase__ ).decode("""utf-8""" ) if __name__ == "__main__": import doctest doctest.testmod()
84
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
'''simple docstring''' from __future__ import annotations import requests def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case ).json() def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" snake_case_ = requests.get(snake_case ).json()[:max_stories] return [get_hackernews_story(snake_case ) for story_id in story_ids] def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = hackernews_top_stories(snake_case ) return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
85
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''Salesforce/codegen-350M-mono''': 2048, } class snake_case_ ( __A ): __A : Optional[Any] = VOCAB_FILES_NAMES __A : Any = PRETRAINED_VOCAB_FILES_MAP __A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : List[str] = ["input_ids", "attention_mask"] __A : Dict = CodeGenTokenizer def __init__( self : List[Any] , lowercase_ : Dict=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Union[str, Any]="<|endoftext|>" , lowercase_ : Union[str, Any]="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : List[Any] , ) -> Optional[Any]: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) if kwargs.pop("add_bos_token" , lowercase_ ): lowercase__ : Optional[Any] = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) lowercase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space: lowercase__ : List[str] = getattr(lowercase_ , pre_tok_state.pop("type" ) ) lowercase__ : Optional[int] = add_prefix_space lowercase__ : str = pre_tok_class(**lowercase_ ) lowercase__ : str = add_prefix_space def __UpperCamelCase ( self : List[Any] , *lowercase_ : Any , **lowercase_ : Dict ) -> BatchEncoding: lowercase__ : str = kwargs.get("is_split_into_words" , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : Any ) -> BatchEncoding: lowercase__ : str = kwargs.get("is_split_into_words" , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: lowercase__ : List[Any] = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , lowercase_ : bool = False , lowercase_ : bool = None , lowercase_ : Optional[List[str]] = None , **lowercase_ : Optional[int] , ) -> str: lowercase__ : Dict = super().decode( token_ids=lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , ) if truncate_before_pattern is not None and len(lowercase_ ) > 0: lowercase__ : Dict = self.truncate(lowercase_ , lowercase_ ) return decoded_text def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Any ) -> Optional[int]: def find_re(lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ): lowercase__ : Any = pattern.search(lowercase_ , lowercase_ ) return m.start() if m else -1 lowercase__ : List[Any] = [re.compile(lowercase_ , re.MULTILINE ) for pattern in truncate_before_pattern] lowercase__ : Optional[Any] = list(re.finditer("^print" , lowercase_ , re.MULTILINE ) ) if len(lowercase_ ) > 1: lowercase__ : int = completion[: prints[1].start()] lowercase__ : Optional[int] = list(re.finditer("^def" , lowercase_ , re.MULTILINE ) ) if len(lowercase_ ) > 1: lowercase__ : Union[str, Any] = completion[: defs[1].start()] lowercase__ : Tuple = 0 lowercase__ : Union[str, Any] = [ pos for pos in [find_re(lowercase_ , lowercase_ , lowercase_ ) for terminal in terminals] if pos != -1 ] if len(lowercase_ ) > 0: return completion[: min(lowercase_ )] else: return completion
87
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
def a__ ( A_ ): '''simple docstring''' if len(A_ ) <= 1: return [tuple(A_ )] __magic_name__ = [] def generate(A_, A_ ): __magic_name__ = [0] * n res.append(tuple(A_ ) ) __magic_name__ = 0 while i < n: if c[i] < i: if i % 2 == 0: __magic_name__ , __magic_name__ = arr[i], arr[0] else: __magic_name__ , __magic_name__ = arr[i], arr[c[i]] res.append(tuple(A_ ) ) c[i] += 1 __magic_name__ = 0 else: __magic_name__ = 0 i += 1 generate(len(A_ ), A_ ) return res if __name__ == "__main__": __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
88
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=_UpperCamelCase ): lowerCAmelCase : str = ['note_seq'] def __init__( self : Tuple ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ): requires_backends(self ,['note_seq'] ) @classmethod def __lowercase ( cls : List[Any] ,*_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[Any] ): requires_backends(cls ,['note_seq'] ) @classmethod def __lowercase ( cls : Union[str, Any] ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Any ): requires_backends(cls ,['note_seq'] )
89
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = relative_attention __lowerCamelCase = position_biased_input __lowerCamelCase = pos_att_type __lowerCamelCase = scope def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = DebertaModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0] __lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0] __lowerCamelCase = model(lowerCamelCase__ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = DebertaForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) snake_case_ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = DebertaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='Model not available yet' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' pass @slow def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' ) __lowerCamelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
90
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = checkpoint SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight'''] SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['''encoder.conv_in.bias'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_out.weight'''] SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''encoder.conv_out.bias'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.norm_out.weight'''] SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''encoder.norm_out.bias'''] SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.conv_in.weight'''] SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_in.bias'''] SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['''decoder.conv_out.weight'''] SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_out.bias'''] SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''decoder.norm_out.weight'''] SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.norm_out.bias'''] SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''quant_conv.weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''quant_conv.bias'''] SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.weight'''] SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only SCREAMING_SNAKE_CASE_ : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) SCREAMING_SNAKE_CASE_ : Tuple = { layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) SCREAMING_SNAKE_CASE_ : Tuple = { layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__a ) } for i in range(__a ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.weight' ) SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.bias' ) SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''encoder.mid.block''' in key] SCREAMING_SNAKE_CASE_ : Any = 2 for i in range(1 , num_mid_res_blocks + 1 ): SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key] SCREAMING_SNAKE_CASE_ : List[str] = renew_vae_resnet_paths(__a ) SCREAMING_SNAKE_CASE_ : Dict = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) SCREAMING_SNAKE_CASE_ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] SCREAMING_SNAKE_CASE_ : Union[str, Any] = renew_vae_attention_paths(__a ) SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): SCREAMING_SNAKE_CASE_ : List[str] = num_up_blocks - 1 - i SCREAMING_SNAKE_CASE_ : Tuple = [ key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key ] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.weight' ] SCREAMING_SNAKE_CASE_ : str = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.bias' ] SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key] SCREAMING_SNAKE_CASE_ : List[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key] SCREAMING_SNAKE_CASE_ : int = renew_vae_resnet_paths(__a ) SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] SCREAMING_SNAKE_CASE_ : Dict = renew_vae_attention_paths(__a ) SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def _A (__a , __a , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) SCREAMING_SNAKE_CASE_ : Dict = io.BytesIO(r.content ) SCREAMING_SNAKE_CASE_ : Any = OmegaConf.load(__a ) SCREAMING_SNAKE_CASE_ : int = 5_12 SCREAMING_SNAKE_CASE_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open SCREAMING_SNAKE_CASE_ : Dict = {} with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f: for key in f.keys(): SCREAMING_SNAKE_CASE_ : Any = f.get_tensor(__a ) else: SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(__a , map_location=__a )['''state_dict'''] # Convert the VAE model. SCREAMING_SNAKE_CASE_ : Any = create_vae_diffusers_config(__a , image_size=__a ) SCREAMING_SNAKE_CASE_ : int = custom_convert_ldm_vae_checkpoint(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") UpperCAmelCase_ : Any = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
91
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
from __future__ import annotations import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray ): __lowerCAmelCase , __lowerCAmelCase = np.shape(SCREAMING_SNAKE_CASE_ ) if rows != columns: __lowerCAmelCase = ( "'table' has to be of square shaped array but got a " F"""{rows}x{columns} array:\n{table}""" ) raise ValueError(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = np.zeros((rows, columns) ) __lowerCAmelCase = np.zeros((rows, columns) ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) ) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists" ) __lowerCAmelCase = (table[i][j] - total) / upper[j][j] __lowerCAmelCase = 1 for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
92
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _lowercase : Dict = 5_0_0_0_0 _lowercase : Union[str, Any] = 5_0_0_0 _lowercase , _lowercase : List[str] = os.path.split(__file__) _lowercase : Any = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def snake_case_ ( __SCREAMING_SNAKE_CASE : datasets.Dataset , __SCREAMING_SNAKE_CASE : str ): """simple docstring""" for i in range(__SCREAMING_SNAKE_CASE ): lowercase_ : int = dataset[i] @get_duration def snake_case_ ( __SCREAMING_SNAKE_CASE : datasets.Dataset , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): """simple docstring""" for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): lowercase_ : int = dataset[i : i + batch_size] @get_duration def snake_case_ ( __SCREAMING_SNAKE_CASE : datasets.Dataset , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" with dataset.formatted_as(type=__SCREAMING_SNAKE_CASE ): for i in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = dataset[i] @get_duration def snake_case_ ( __SCREAMING_SNAKE_CASE : datasets.Dataset , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" with dataset.formatted_as(type=__SCREAMING_SNAKE_CASE ): for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase_ : List[str] = dataset[i : i + batch_size] def snake_case_ ( ): """simple docstring""" lowercase_ : Any = {'''num examples''': SPEED_TEST_N_EXAMPLES} lowercase_ : Optional[int] = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] lowercase_ : List[str] = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) lowercase_ : Dict = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) lowercase_ : Dict = generate_example_dataset( os.path.join(__SCREAMING_SNAKE_CASE , '''dataset.arrow''' ) , __SCREAMING_SNAKE_CASE , num_examples=__SCREAMING_SNAKE_CASE , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ , str(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[Any] = func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) print('''shuffling dataset''' ) lowercase_ : int = dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Union[str, Any] = func( __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f: f.write(json.dumps(__SCREAMING_SNAKE_CASE ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
93
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging snake_case : Any = logging.get_logger(__name__) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['input_values', 'attention_mask'] def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ): super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase ) a :Union[str, Any] = do_normalize a :List[Any] = return_attention_mask a :List[str] = num_mel_bins a :List[str] = hop_length a :List[Any] = win_length a :List[Any] = win_function a :List[str] = frame_signal_scale a :List[str] = fmin a :Tuple = fmax a :List[Any] = mel_floor a :Union[str, Any] = reduction_factor a :Union[str, Any] = win_length * sampling_rate // 1000 a :Dict = hop_length * sampling_rate // 1000 a :Any = optimal_fft_length(self.sample_size ) a :List[Any] = (self.n_fft // 2) + 1 a :Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase ) a :str = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ): if attention_mask is not None: a :List[Any] = np.array(_lowerCamelCase , np.intaa ) a :List[str] = [] for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ): a :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: a :Union[str, Any] = padding_value normed_input_values.append(_lowerCamelCase ) else: a :List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , ): a :Union[str, Any] = spectrogram( _lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: a :Optional[Any] = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) else: a :int = None if audio_target is not None: a :Optional[int] = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) if inputs is None: return inputs_target else: a :Optional[Any] = inputs_target['''input_values'''] a :Union[str, Any] = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: a :str = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a :List[Any] = is_batched_numpy or ( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a :str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): a :List[Any] = speech.astype(np.floataa ) # always return batch if not is_batched: a :List[Any] = [speech] # needed to make pad() work on spectrogram inputs a :Optional[int] = self.feature_size # convert into correct format for padding if is_target: a :List[Any] = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech] a :List[Any] = BatchFeature({'''input_values''': features} ) a :List[Any] = self.num_mel_bins else: a :List[str] = BatchFeature({'''input_values''': speech} ) a :Optional[int] = self.pad( _lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) a :List[str] = feature_size_hack # convert input values to correct format a :Tuple = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): a :int = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_lowerCamelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): a :Union[str, Any] = [array.astype(np.floataa ) for array in input_values] elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): a :Optional[int] = input_values.astype(np.floataa ) # convert attention_mask to correct format a :Any = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: a :Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: a :Union[str, Any] = ( attention_mask if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) a :List[str] = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value ) if return_tensors is not None: a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = super().to_dict() # Don't serialize these as they are derived from the other properties. a :Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
94
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {} UpperCAmelCase : Dict[Optional[str], str] = {} UpperCAmelCase : Dict[Optional[str], Exception] = {} def _A ( SCREAMING_SNAKE_CASE : type , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ): """simple docstring""" a__ : Optional[int] =aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) a__ : Tuple =formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) a__ : int =format_type def _A ( SCREAMING_SNAKE_CASE : Exception , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[List[str]] = None ): """simple docstring""" a__ : Tuple =aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): a__ : str =unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["""python"""]) _register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""]) _register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""]) _register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""]) _register_formatter(CustomFormatter, """custom""") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""]) else: UpperCAmelCase : int = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""") _register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""]) else: UpperCAmelCase : Dict = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""") _register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, """jax""", aliases=[]) else: UpperCAmelCase : List[Any] = ValueError("""JAX needs to be installed to be able to return JAX arrays.""") _register_unavailable_formatter(_jax_error, """jax""", aliases=[]) def _A ( SCREAMING_SNAKE_CASE : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _A ( SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : Optional[Any] =get_format_type_from_alias(SCREAMING_SNAKE_CASE ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
95
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" lowercase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowercase__ = [None] * 1000_0000 lowercase__ = True lowercase__ = False def _snake_case ( lowercase__ ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCamelCase : Any = chain(next_number(lowercase__ ) ) _lowerCamelCase : Dict = number_chain while number < 10000000: _lowerCamelCase : Union[str, Any] = number_chain number *= 10 return number_chain def _snake_case ( lowercase__ = 10000000 ): for i in range(1 , lowercase__ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
96
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
'''simple docstring''' from typing import Any def a ( __a ) -> list[Any]: '''simple docstring''' if not input_list: return [] UpperCamelCase__ :str = [input_list.count(__a ) for value in input_list] UpperCamelCase__ :Dict = max(__a ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__a ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
97
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowerCAmelCase__ : str = logging.get_logger(__name__) @dataclass class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCAmelCase__ = deprecated_arg[3:] setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) UpperCAmelCase__ = kwargs.pop('torchscript' ,self.torchscript ) UpperCAmelCase__ = kwargs.pop('torch_xla_tpu_print_metrics' ,self.torch_xla_tpu_print_metrics ) UpperCAmelCase__ = kwargs.pop('fp16_opt_level' ,self.fpaa_opt_level ) super().__init__(**lowerCamelCase__ ) snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Trace the models using torchscript"} ) snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) snake_case__ = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def __lowerCAmelCase ( self : Optional[int] ): requires_backends(self ,['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: UpperCAmelCase__ = torch.device('cpu' ) UpperCAmelCase__ = 0 elif is_torch_tpu_available(): UpperCAmelCase__ = xm.xla_device() UpperCAmelCase__ = 0 else: UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) UpperCAmelCase__ = torch.cuda.device_count() return device, n_gpu @property def __lowerCAmelCase ( self : Tuple ): return is_torch_tpu_available() and self.tpu @property def __lowerCAmelCase ( self : Any ): requires_backends(self ,['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __lowerCAmelCase ( self : Dict ): requires_backends(self ,['torch'] ) return self._setup_devices[0] @property def __lowerCAmelCase ( self : int ): requires_backends(self ,['torch'] ) return self._setup_devices[1] @property def __lowerCAmelCase ( self : List[str] ): return self.n_gpu > 0
98
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowercase : Tuple = logging.get_logger(__name__) class A__ : """simple docstring""" __A : str __A : str = None @staticmethod def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Optional[int]: '''simple docstring''' raise NotImplementedError def __lowercase ( self , lowercase) -> str: '''simple docstring''' raise NotImplementedError def __lowercase ( self) -> List[str]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.') @classmethod def __lowercase ( cls) -> int: '''simple docstring''' return F'`pip install {cls.pip_package or cls.name}`' class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Dict = '''optuna''' @staticmethod def __lowercase ( ) -> Optional[Any]: '''simple docstring''' return is_optuna_available() def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> List[str]: '''simple docstring''' return run_hp_search_optuna(lowercase , lowercase , lowercase , **lowercase) def __lowercase ( self , lowercase) -> int: '''simple docstring''' return default_hp_space_optuna(lowercase) class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Any = '''ray''' __A : Optional[int] = '''\'ray[tune]\'''' @staticmethod def __lowercase ( ) -> List[Any]: '''simple docstring''' return is_ray_available() def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Any: '''simple docstring''' return run_hp_search_ray(lowercase , lowercase , lowercase , **lowercase) def __lowercase ( self , lowercase) -> int: '''simple docstring''' return default_hp_space_ray(lowercase) class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Union[str, Any] = '''sigopt''' @staticmethod def __lowercase ( ) -> List[Any]: '''simple docstring''' return is_sigopt_available() def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]: '''simple docstring''' return run_hp_search_sigopt(lowercase , lowercase , lowercase , **lowercase) def __lowercase ( self , lowercase) -> List[Any]: '''simple docstring''' return default_hp_space_sigopt(lowercase) class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Optional[Any] = '''wandb''' @staticmethod def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' return is_wandb_available() def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(lowercase , lowercase , lowercase , **lowercase) def __lowercase ( self , lowercase) -> Tuple: '''simple docstring''' return default_hp_space_wandb(lowercase) lowercase : List[str] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def A_ ( ) -> str: a__ : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(A__ ) > 0: a__ : Any = available_backends[0].name if len(A__ ) > 1: logger.info( F'{len(A__ )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( 'No hyperparameter search backend available.\n' + '\n'.join( F' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
99
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {"vocab_file": "spiece.model"} __magic_name__ = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } __magic_name__ = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Tuple = VOCAB_FILES_NAMES __lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] __lowercase : List[int] = [] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowerCAmelCase__) @property def snake_case_ ( self): return self.sp_model.get_piece_size() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def snake_case_ ( self , lowerCAmelCase__): return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): return self.sp_model.piece_to_id(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(lowerCAmelCase__) return token def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = """""" __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(lowerCAmelCase__) return out_string.strip() def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ): __SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__)) __SCREAMING_SNAKE_CASE = [] sub_texts.append(lowerCAmelCase__) else: current_sub_text.append(lowerCAmelCase__) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __SCREAMING_SNAKE_CASE = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(lowerCAmelCase__)) else: __SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __SCREAMING_SNAKE_CASE = self.clean_up_tokenization(lowerCAmelCase__) return clean_text else: return text def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): if not os.path.isdir(lowerCAmelCase__): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return __SCREAMING_SNAKE_CASE = os.path.join( lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__ , """wb""") as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__)) + [1] return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1] def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
100
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
from random import shuffle import tensorflow as tf from numpy import array def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = int(lowerCAmelCase__ ) assert noofclusters < len(lowerCAmelCase__ ) # Find out the dimensionality lowercase = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase = list(range(len(lowerCAmelCase__ ) ) ) shuffle(lowerCAmelCase__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCAmelCase__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase = tf.placeholder('''float64''' , [dim] ) lowercase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowerCAmelCase__ , lowerCAmelCase__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase = [tf.Variable(0 ) for i in range(len(lowerCAmelCase__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase = tf.placeholder('''int32''' ) lowercase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowerCAmelCase__ , lowerCAmelCase__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase = tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase = tf.reduce_mean(lowerCAmelCase__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase = tf.placeholder('''float''' , [dim] ) lowercase = tf.placeholder('''float''' , [dim] ) lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCAmelCase__ , lowerCAmelCase__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase = tf.placeholder('''float''' , [noofclusters] ) lowercase = tf.argmin(lowerCAmelCase__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase = tf.initialize_all_variables() # Initialize all variables sess.run(lowerCAmelCase__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase = 100 for _ in range(lowerCAmelCase__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowerCAmelCase__ ) ): lowercase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase = [ sess.run(lowerCAmelCase__ , feed_dict={va: vect, va: sess.run(lowerCAmelCase__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase = sess.run( lowerCAmelCase__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowerCAmelCase__ ): # Collect all the vectors assigned to this cluster lowercase = [ vectors[i] for i in range(len(lowerCAmelCase__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase = sess.run( lowerCAmelCase__ , feed_dict={mean_input: array(lowerCAmelCase__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase = sess.run(lowerCAmelCase__ ) lowercase = sess.run(lowerCAmelCase__ ) return centroids, assignments
101
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ): '''simple docstring''' __snake_case : Union[str, Any] = parent __snake_case : Dict = batch_size __snake_case : Optional[int] = seq_length __snake_case : Tuple = is_training __snake_case : Optional[int] = use_attention_mask __snake_case : Dict = use_token_type_ids __snake_case : Dict = use_labels __snake_case : Tuple = vocab_size __snake_case : Tuple = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Any = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Dict = max_position_embeddings __snake_case : str = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Optional[int] = initializer_range __snake_case : Any = num_choices def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Union[str, Any] = None if self.use_attention_mask: __snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[Any] = None if self.use_token_type_ids: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Tuple = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs __snake_case : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = FlaxAlbertModelTester(self ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' ) __snake_case : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a_ ) @require_flax class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) __snake_case : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __snake_case : Dict = model(a_ , attention_mask=a_ )[0] __snake_case : Dict = (1, 11, 7_68) self.assertEqual(output.shape , a_ ) __snake_case : int = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
102
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : List[str] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : str , **A_ : Any): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : Optional[int] , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[str] , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : List[str] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : Optional[int] , **A_ : Any): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : Dict , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : int , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : Optional[int] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Union[str, Any] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Tuple , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[int] , *A_ : Dict , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : Tuple , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Optional[int] , **A_ : Optional[int]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : List[Any] , **A_ : str): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : List[Any] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : Optional[int] , **A_ : List[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[str] , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : Optional[int] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : int , *A_ : int , **A_ : Optional[int]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : List[Any] , *A_ : List[str] , **A_ : Tuple): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Dict , *A_ : List[str] , **A_ : List[str]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : str , *A_ : List[Any] , **A_ : Optional[Any]): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : Optional[Any] , **A_ : int): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Optional[Any] , *A_ : Optional[Any] , **A_ : Dict): requires_backends(self , ['''sentencepiece''']) class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''sentencepiece'''] def __init__( self : Tuple , *A_ : Optional[int] , **A_ : Union[str, Any]): requires_backends(self , ['''sentencepiece'''])
103
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt'''} lowerCAmelCase__ = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } lowerCAmelCase__ = { '''facebook/esm2_t6_8M_UR50D''': 1024, '''facebook/esm2_t12_35M_UR50D''': 1024, } def _A ( A__ ): """simple docstring""" with open(A__ , '''r''' ) as f: __lowercase = f.read().splitlines() return [l.strip() for l in lines] class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask'] def __init__( self : Dict ,lowercase__ : Optional[int] ,lowercase__ : str="<unk>" ,lowercase__ : Dict="<cls>" ,lowercase__ : List[Any]="<pad>" ,lowercase__ : Optional[int]="<mask>" ,lowercase__ : str="<eos>" ,**lowercase__ : Union[str, Any] ,): super().__init__(**lowercase__ ) __lowercase = load_vocab_file(lowercase__ ) __lowercase = dict(enumerate(self.all_tokens ) ) __lowercase = {tok: ind for ind, tok in enumerate(self.all_tokens )} __lowercase = unk_token __lowercase = cls_token __lowercase = pad_token __lowercase = mask_token __lowercase = eos_token __lowercase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): return self._id_to_token.get(lowercase__ ,self.unk_token ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ): return self._token_to_id.get(lowercase__ ,self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[int] ,**lowercase__ : List[str] ): return text.split() def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any]=False ): return len(self._id_to_token ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): return {token: i for i, token in enumerate(self.all_tokens )} def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ): return self._token_to_id.get(lowercase__ ,self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ): return self._id_to_token.get(lowercase__ ,self.unk_token ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ): __lowercase = [self.cls_token_id] __lowercase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List ,lowercase__ : Optional[List] = None ,lowercase__ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] __lowercase = [1] + ([0] * len(lowercase__ )) + [1] if token_ids_a is not None: mask += [0] * len(lowercase__ ) + [1] return mask def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Dict ): __lowercase = os.path.join(lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowercase__ ,'''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def SCREAMING_SNAKE_CASE ( self : Tuple ): return self.get_vocab_size(with_added_tokens=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[List[str], List[AddedToken]] ,lowercase__ : bool = False ): return super()._add_tokens(lowercase__ ,special_tokens=lowercase__ )
104
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> str: a : Dict = 0 def __a ( self ) -> List[str]: a : List[str] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: a : Union[str, Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : str = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: a : Optional[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally a : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict() config_dict.pop("image_processor_type" ) a : Optional[int] = CLIPImageProcessor(**lowerCAmelCase__ ) # save in new folder model_config.save_pretrained(lowerCAmelCase__ ) config.save_pretrained(lowerCAmelCase__ ) a : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) # make sure private variable is not incorrectly saved a : List[Any] = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdirname: a : Dict = Path(lowerCAmelCase__ ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with self.assertRaisesRegex( lowerCAmelCase__ , "clip-base is not a local folder and is not a valid model identifier" ): a : Union[str, Any] = AutoImageProcessor.from_pretrained("clip-base" ) def __a ( self ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): a : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" ) def __a ( self ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): a : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def __a ( self ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase__ ): a : Union[str, Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase__ ): a : Tuple = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) a : Tuple = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCAmelCase__ ) a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def __a ( self ) -> int: try: AutoConfig.register("custom" , lowerCAmelCase__ ) AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__ ): AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: a : List[Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Any = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Optional[int] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCAmelCase__ ) a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __a ( self ) -> int: class __UpperCamelCase ( a__ ): lowerCamelCase : Tuple =True try: AutoConfig.register("custom" , lowerCAmelCase__ ) AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # If remote code is not set, the default is to use local a : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. a : Optional[int] = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub a : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(lowerCAmelCase__ , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
105
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : Tuple = tempfile.mkdtemp() lowerCAmelCase__ : List[Any] = BlipImageProcessor() lowerCAmelCase__ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) lowerCAmelCase__ : Optional[Any] = BlipaProcessor(lowercase_ ,lowercase_ ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Dict ,**lowercase_ : Dict ): return AutoProcessor.from_pretrained(self.tmpdirname ,**lowercase_ ).tokenizer def __lowerCAmelCase ( self : Union[str, Any] ,**lowercase_ : Tuple ): return AutoProcessor.from_pretrained(self.tmpdirname ,**lowercase_ ).image_processor def __lowerCAmelCase ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Union[str, Any] = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )] lowerCAmelCase__ : Any = [Image.fromarray(np.moveaxis(lowercase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : str = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' ) lowerCAmelCase__ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ ,padding_value=1.0 ) lowerCAmelCase__ : List[str] = BlipaProcessor.from_pretrained( self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=lowercase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,lowercase_ ) def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : List[Any] = self.get_tokenizer() lowerCAmelCase__ : Tuple = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ ) lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Dict = image_processor(lowercase_ ,return_tensors='''np''' ) lowerCAmelCase__ : Dict = processor(images=lowercase_ ,return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer() lowerCAmelCase__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ ) lowerCAmelCase__ : Any = '''lower newer''' lowerCAmelCase__ : Optional[Any] = processor(text=lowercase_ ) lowerCAmelCase__ : Dict = tokenizer(lowercase_ ,return_token_type_ids=lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : int = self.get_image_processor() lowerCAmelCase__ : Optional[Any] = self.get_tokenizer() lowerCAmelCase__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ ) lowerCAmelCase__ : Any = '''lower newer''' lowerCAmelCase__ : Optional[int] = self.prepare_image_inputs() lowerCAmelCase__ : List[Any] = processor(text=lowercase_ ,images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : Tuple = self.get_tokenizer() lowerCAmelCase__ : List[Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ ) lowerCAmelCase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ : Dict = processor.batch_decode(lowercase_ ) lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ ,lowercase_ ) def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : Union[str, Any] = self.get_image_processor() lowerCAmelCase__ : Dict = self.get_tokenizer() lowerCAmelCase__ : Union[str, Any] = BlipaProcessor(tokenizer=lowercase_ ,image_processor=lowercase_ ) lowerCAmelCase__ : List[Any] = '''lower newer''' lowerCAmelCase__ : Tuple = self.prepare_image_inputs() lowerCAmelCase__ : List[Any] = processor(text=lowercase_ ,images=lowercase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
106
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : bool = field(default=_UpperCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path, GenerationConfig]] = field( default=_UpperCamelCase , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def __UpperCAmelCase ( self : int ) -> Dict: a = super().to_dict() for k, v in d.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): a = v.to_dict() return d
107
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" from __future__ import annotations import math class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Any = size # approximate the overall size of segment tree with given value lowerCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowerCAmelCase : List[str] = [0 for i in range(0 , 4 * size )] lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase__ ( self , snake_case__ ): """simple docstring""" return idx * 2 def lowercase__ ( self , snake_case__ ): """simple docstring""" return idx * 2 + 1 def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" if left_element == right_element: lowerCAmelCase : List[str] = a[left_element - 1] else: lowerCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ ) self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ ) lowerCAmelCase : Tuple = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" if self.flag[idx] is True: lowerCAmelCase : Optional[int] = self.lazy[idx] lowerCAmelCase : List[str] = False if left_element != right_element: lowerCAmelCase : Optional[Any] = self.lazy[idx] lowerCAmelCase : List[Any] = self.lazy[idx] lowerCAmelCase : List[Any] = True lowerCAmelCase : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowerCAmelCase : str = val if left_element != right_element: lowerCAmelCase : Optional[Any] = val lowerCAmelCase : Union[str, Any] = val lowerCAmelCase : int = True lowerCAmelCase : int = True return True lowerCAmelCase : List[str] = (left_element + right_element) // 2 self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Optional[int] = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) return True def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" if self.flag[idx] is True: lowerCAmelCase : List[Any] = self.lazy[idx] lowerCAmelCase : str = False if left_element != right_element: lowerCAmelCase : Tuple = self.lazy[idx] lowerCAmelCase : List[Any] = self.lazy[idx] lowerCAmelCase : Optional[int] = True lowerCAmelCase : str = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowerCAmelCase : Any = (left_element + right_element) // 2 lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Dict = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ ) return max(snake_case__ , snake_case__ ) def __str__( self ): """simple docstring""" return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowerCAmelCase__ = 15 lowerCAmelCase__ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
108
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A: Tuple = logging.get_logger(__name__) A: List[Any] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : int = 'segformer' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , _SCREAMING_SNAKE_CASE , ) UpperCAmelCase : Dict = num_channels UpperCAmelCase : Optional[Any] = num_encoder_blocks UpperCAmelCase : Optional[Any] = depths UpperCAmelCase : Dict = sr_ratios UpperCAmelCase : Any = hidden_sizes UpperCAmelCase : Any = patch_sizes UpperCAmelCase : Union[str, Any] = strides UpperCAmelCase : Optional[Any] = mlp_ratios UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = classifier_dropout_prob UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : List[str] = drop_path_rate UpperCAmelCase : int = layer_norm_eps UpperCAmelCase : Optional[Any] = decoder_hidden_size UpperCAmelCase : int = kwargs.get("""reshape_last_stage""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : List[str] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE ( self ) -> float: '''simple docstring''' return 1E-4 @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 12
109
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _snake_case ( ) -> Any: lowerCamelCase_ : Union[str, Any] ="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" lowerCamelCase_ : Union[str, Any] =Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" ) return image def _snake_case ( lowerCamelCase__ : Dict ) -> Optional[int]: lowerCamelCase_ : List[str] =[] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ) -> Any: lowerCamelCase_ : Dict =dct.pop(UpperCamelCase__ ) lowerCamelCase_ : Union[str, Any] =val def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ) -> List[str]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ : Optional[int] =state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowerCamelCase_ : Optional[int] =state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowerCamelCase_ : Optional[Any] =torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) ) lowerCamelCase_ : str =qkv_bias def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> Optional[int]: lowerCamelCase_ : Tuple =364 if "coco" in model_name else 224 lowerCamelCase_ : List[str] =BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowerCamelCase_ : Tuple =OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCamelCase__ ).to_dict() elif "opt-6.7b" in model_name: lowerCamelCase_ : List[str] =OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCamelCase__ ).to_dict() elif "t5-xl" in model_name: lowerCamelCase_ : Optional[Any] =TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ : Dict =TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() lowerCamelCase_ : List[Any] =BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ ) return config, image_size @torch.no_grad() def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=False ) -> Optional[Any]: lowerCamelCase_ : List[str] =( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) lowerCamelCase_ : Tuple =tokenizer("\n" , add_special_tokens=UpperCamelCase__ ).input_ids[0] lowerCamelCase_ , lowerCamelCase_ : Optional[int] =get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] =BlipaForConditionalGeneration(UpperCamelCase__ ).eval() lowerCamelCase_ : Tuple ={ "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } lowerCamelCase_ , lowerCamelCase_ : List[Any] =model_name_to_original[model_name] # load original model print("Loading original model..." ) lowerCamelCase_ : Union[str, Any] ="cuda" if torch.cuda.is_available() else "cpu" lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str =load_model_and_preprocess( name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ ) original_model.eval() print("Done!" ) # update state dict keys lowerCamelCase_ : str =original_model.state_dict() lowerCamelCase_ : Optional[int] =create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ : Dict =state_dict.pop(UpperCamelCase__ ) if key.startswith("Qformer.bert" ): lowerCamelCase_ : Union[str, Any] =key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: lowerCamelCase_ : int =key.replace("self" , "attention" ) if "opt_proj" in key: lowerCamelCase_ : List[Any] =key.replace("opt_proj" , "language_projection" ) if "t5_proj" in key: lowerCamelCase_ : int =key.replace("t5_proj" , "language_projection" ) if key.startswith("opt" ): lowerCamelCase_ : List[Any] =key.replace("opt" , "language" ) if key.startswith("t5" ): lowerCamelCase_ : Optional[Any] =key.replace("t5" , "language" ) lowerCamelCase_ : str =val # read in qv biases read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ : int =hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowerCamelCase_ : str =load_demo_image() lowerCamelCase_ : Optional[int] =vis_processors["eval"](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] =tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCamelCase__ ) # create processor lowerCamelCase_ : Optional[int] =BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ ) lowerCamelCase_ : str =BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase_ : int =processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values.to(UpperCamelCase__ ) # make sure processor creates exact same pixel values assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) hf_model.to(UpperCamelCase__ ) with torch.no_grad(): if "opt" in model_name: lowerCamelCase_ : List[Any] =original_model({"image": original_pixel_values, "text_input": [""]} ).logits lowerCamelCase_ : int =hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits else: lowerCamelCase_ : Union[str, Any] =original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits lowerCamelCase_ : Union[str, Any] =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ : Any =hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowerCamelCase_ : Dict =torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCamelCase__ ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowerCamelCase_ : Any =torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCamelCase__ ) else: # cast to same type lowerCamelCase_ : Dict =logits.dtype assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 ) print("Looks ok!" ) print("Generating a caption..." ) lowerCamelCase_ : Dict ="" lowerCamelCase_ : Union[str, Any] =tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids.to(UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] =original_model.generate({"image": original_pixel_values} ) lowerCamelCase_ : List[Any] =hf_model.generate( UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] =input_ids.shape[1] lowerCamelCase_ : str =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ ) lowerCamelCase_ : List[str] =[text.strip() for text in output_text] print("HF generation:" , UpperCamelCase__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(UpperCamelCase__ ) hf_model.save_pretrained(UpperCamelCase__ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": A__ : Optional[Any] = argparse.ArgumentParser() A__ : List[str] = [ """blip2-opt-2.7b""", """blip2-opt-6.7b""", """blip2-opt-2.7b-coco""", """blip2-opt-6.7b-coco""", """blip2-flan-t5-xl""", """blip2-flan-t5-xl-coco""", """blip2-flan-t5-xxl""", ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) A__ : Union[str, Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
144
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase__ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=None , lowercase=True , lowercase=True , lowercase=None , ) -> str: '''simple docstring''' A__ = size if size is not None else {"height": 20, "width": 20} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = size A__ = do_normalize A__ = do_convert_rgb A__ = [512, 1024, 2048, 4096] A__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" A__ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class a__ ( lowercase_ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = PixaStructImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , "do_normalize" ) ) self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.image_processor_tester.prepare_dummy_image() A__ = self.image_processing_class(**self.image_processor_dict ) A__ = 2048 A__ = image_processor(lowercase , return_tensors="pt" , max_patches=lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched A__ = image_processor( lowercase , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 A__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowercase ): A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase ).flattened_patches A__ = "Hello" A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase , header_text=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched A__ = image_processor( lowercase , return_tensors="pt" , max_patches=lowercase , header_text=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) A__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched A__ = image_processor( lowercase , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched A__ = image_processor( lowercase , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class a__ ( lowercase_ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = PixaStructImageProcessingTester(self , num_channels=4 ) A__ = 3 @property def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , "do_normalize" ) ) self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input A__ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched A__ = image_processor( lowercase , return_tensors="pt" , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
68
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
class _snake_case : def __init__( self , _a , _a , _a ): __magic_name__ : Dict = name __magic_name__ : Tuple = value __magic_name__ : List[str] = weight def __repr__( self ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def SCREAMING_SNAKE_CASE ( self ): return self.value def SCREAMING_SNAKE_CASE ( self ): return self.name def SCREAMING_SNAKE_CASE ( self ): return self.weight def SCREAMING_SNAKE_CASE ( self ): return self.value / self.weight def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' __magic_name__ : Optional[int] = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> str: '''simple docstring''' __magic_name__ : Union[str, Any] = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) __magic_name__ : List[Any] = [] __magic_name__ , __magic_name__ : Optional[Any] = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
281
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase_ ( lowercase_ , lowercase_): snake_case__ = '''dinat''' snake_case__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , __UpperCamelCase : List[str]=4 , __UpperCamelCase : int=3 , __UpperCamelCase : str=64 , __UpperCamelCase : Tuple=[3, 4, 6, 5] , __UpperCamelCase : int=[2, 4, 8, 16] , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __UpperCamelCase : Any=3.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Union[str, Any]=1E-5 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=None , **__UpperCamelCase : List[Any] , ) -> List[Any]: super().__init__(**__UpperCamelCase ) _UpperCamelCase = patch_size _UpperCamelCase = num_channels _UpperCamelCase = embed_dim _UpperCamelCase = depths _UpperCamelCase = len(__UpperCamelCase ) _UpperCamelCase = num_heads _UpperCamelCase = kernel_size _UpperCamelCase = dilations _UpperCamelCase = mlp_ratio _UpperCamelCase = qkv_bias _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = drop_path_rate _UpperCamelCase = hidden_act _UpperCamelCase = layer_norm_eps _UpperCamelCase = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) ) _UpperCamelCase = layer_scale_init_value _UpperCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__UpperCamelCase ) + 1 )] _UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
256
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class lowercase__ ( lowercase_ ): '''simple docstring''' A_ : List[str] = '''Wav2Vec2FeatureExtractor''' A_ : List[Any] = '''AutoTokenizer''' def __init__( self , __snake_case , __snake_case ): super().__init__(__snake_case , __snake_case ) _SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor _SCREAMING_SNAKE_CASE : Union[str, Any] = False @classmethod def UpperCAmelCase_ ( cls , __snake_case , **__snake_case ): try: return super().from_pretrained(__snake_case , **__snake_case ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ , __snake_case , ) _SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(__snake_case , **__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaCTCTokenizer.from_pretrained(__snake_case , **__snake_case ) return cls(feature_extractor=__snake_case , tokenizer=__snake_case ) def __call__( self , *__snake_case , **__snake_case ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""raw_speech""" ) else: _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""audio""" , __snake_case ) _SCREAMING_SNAKE_CASE : Any = kwargs.pop("""sampling_rate""" , __snake_case ) _SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""text""" , __snake_case ) if len(__snake_case ) > 0: _SCREAMING_SNAKE_CASE : str = args[0] _SCREAMING_SNAKE_CASE : Optional[Any] = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: _SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case ) if text is not None: _SCREAMING_SNAKE_CASE : int = self.tokenizer(__snake_case , **__snake_case ) if text is None: return inputs elif audio is None: return encodings else: _SCREAMING_SNAKE_CASE : int = encodings["""input_ids"""] return inputs def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*__snake_case , **__snake_case ) _SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""input_features""" , __snake_case ) _SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""labels""" , __snake_case ) if len(__snake_case ) > 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = args[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = args[1:] if input_features is not None: _SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case ) if labels is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.pad(__snake_case , **__snake_case ) if labels is None: return input_features elif input_features is None: return labels else: _SCREAMING_SNAKE_CASE : str = labels["""input_ids"""] return input_features def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ): return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ): return self.tokenizer.decode(*__snake_case , **__snake_case ) @contextmanager def UpperCAmelCase_ ( self ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer yield _SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor _SCREAMING_SNAKE_CASE : Optional[int] = False
200
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: if n_term == "": return [] UpperCAmelCase : str = [] for temp in range(int(UpperCamelCase__ ) ): series.append(F'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": a : Optional[Any] = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
265
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor A : int = logging.get_logger(__name__) class _lowercase ( lowercase_): """simple docstring""" def __init__( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ): '''simple docstring''' warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
184
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
'''simple docstring''' import math from datetime import datetime, timedelta def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Dict = year % 19 _UpperCAmelCase : int = year % 4 _UpperCAmelCase : Optional[Any] = year % 7 _UpperCAmelCase : Union[str, Any] = math.floor(year / 100 ) _UpperCAmelCase : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _UpperCAmelCase : Any = leap_day_inhibits / 4 _UpperCAmelCase : List[str] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _UpperCAmelCase : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _UpperCAmelCase : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _UpperCAmelCase : str = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase__ , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase__ , 4 , 18 ) else: return datetime(UpperCamelCase__ , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): lowerCamelCase__ = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
234
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = """▁""" lowercase_ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} lowercase_ = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } lowercase_ = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } lowercase_ = { """ernie-m-base""": 5_1_4, """ernie-m-large""": 5_1_4, } lowercase_ = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class __lowerCAmelCase ( lowercase_ ): _a = ["input_ids"] _a = VOCAB_FILES_NAMES _a = PRETRAINED_INIT_CONFIGURATION _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = PRETRAINED_VOCAB_FILES_MAP _a = RESOURCE_FILES_NAMES def __init__( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase="utf8" , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase = None , **lowerCAmelCase , ) -> int: '''simple docstring''' _lowercase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , vocab_file=lowerCAmelCase , encoding=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) _lowercase =do_lower_case _lowercase =sentencepiece_model_ckpt _lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _lowercase =self.load_vocab(filepath=lowerCAmelCase ) else: _lowercase ={self.sp_model.id_to_piece(lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )} _lowercase ={v: k for k, v in self.vocab.items()} def A__ ( self , lowerCAmelCase ) -> int: '''simple docstring''' if text is None: return None _lowercase =self.tokenize(lowerCAmelCase ) _lowercase , _lowercase ='', [] for i, ch in enumerate(lowerCAmelCase ): if ch in self.SP_CHAR_MAPPING: _lowercase =self.SP_CHAR_MAPPING.get(lowerCAmelCase ) else: _lowercase =unicodedata.normalize('NFKC' , lowerCAmelCase ) if self.is_whitespace(lowerCAmelCase ): continue normalized_text += ch char_mapping.extend([i] * len(lowerCAmelCase ) ) _lowercase , _lowercase , _lowercase =normalized_text, [], 0 if self.do_lower_case: _lowercase =text.lower() for token in split_tokens: if token[:1] == "▁": _lowercase =token[1:] _lowercase =text[offset:].index(lowerCAmelCase ) + offset _lowercase =start + len(lowerCAmelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) _lowercase =end return token_mapping @property def A__ ( self ) -> Optional[int]: '''simple docstring''' return len(self.vocab ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ) -> str: '''simple docstring''' _lowercase =self.__dict__.copy() _lowercase =None return state def __setstate__( self , lowerCAmelCase ) -> int: '''simple docstring''' _lowercase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowercase ={} _lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def A__ ( self , lowerCAmelCase ) -> int: '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(lowerCAmelCase , lowerCAmelCase ) for c in text) ) def A__ ( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=64 , lowerCAmelCase=0.1 ) -> Optional[int]: '''simple docstring''' if self.sp_model_kwargs.get('enable_sampling' ) is True: _lowercase =True if self.sp_model_kwargs.get('alpha' ) is not None: _lowercase =self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: _lowercase =self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: _lowercase =self.sp_model.EncodeAsPieces(lowerCAmelCase ) else: _lowercase =self.sp_model.SampleEncodeAsPieces(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) _lowercase =[] for pi, piece in enumerate(lowerCAmelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(lowerCAmelCase ) and pi != 0: new_pieces.append(lowerCAmelCase ) continue else: continue _lowercase =0 for i, chunk in enumerate(lowerCAmelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(lowerCAmelCase ) or self.is_punct(lowerCAmelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(lowerCAmelCase ) _lowercase =i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowercase =i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowercase =i if len(lowerCAmelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def A__ ( self , lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' _lowercase =''.join(lowerCAmelCase ).replace(lowerCAmelCase , ' ' ).strip() return out_string def A__ ( self , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' _lowercase =self.convert_ids_to_tokens(lowerCAmelCase ) _lowercase =''.join(lowerCAmelCase ).replace(lowerCAmelCase , ' ' ).strip() return out_string def A__ ( self , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) ) def A__ ( self , lowerCAmelCase ) -> Tuple: '''simple docstring''' return self.reverse_vocab.get(lowerCAmelCase , self.unk_token ) def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase =[self.cls_token_id] _lowercase =[self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> List[str]: '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def A__ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False ) -> Union[str, Any]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1] def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Any: '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(lowerCAmelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(lowerCAmelCase ) + 1) + [1] * (len(lowerCAmelCase ) + 3) def A__ ( self , lowerCAmelCase ) -> int: '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def A__ ( self , lowerCAmelCase ) -> Dict: '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def A__ ( self , lowerCAmelCase ) -> int: '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def A__ ( self , lowerCAmelCase ) -> Dict: '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(lowerCAmelCase ) == 1: _lowercase =unicodedata.category(lowerCAmelCase ) if cat == "Zs": return True return False def A__ ( self , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' _lowercase ={} with io.open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(lowerCAmelCase ): _lowercase =line.rstrip('\n' ) _lowercase =int(lowerCAmelCase ) return token_to_idx def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple: '''simple docstring''' _lowercase =0 if os.path.isdir(lowerCAmelCase ): _lowercase =os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: _lowercase =(filename_prefix + '-' if filename_prefix else '') + save_directory with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) _lowercase =token_index writer.write(token + '\n' ) index += 1 _lowercase =os.path.join(lowerCAmelCase , 'sentencepiece.bpe.model' ) with open(lowerCAmelCase , 'wb' ) as fi: _lowercase =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (vocab_file,)
205
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) a_ = parser.parse_args() a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a_ = CLIPImageProcessor() a_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') a_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
340
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) _snake_case : Union[str, Any] = logging.getLogger(__name__) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : str = np.argmax(UpperCamelCase__ , axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase_ ( __lowerCamelCase ): with open(UpperCamelCase__ , encoding="utf_8" ) as f: __snake_case : List[str] = csv.reader(UpperCamelCase__ ) __snake_case : Dict = [] next(UpperCamelCase__ ) # skip the first line for line in tqdm(UpperCamelCase__ ): output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : List[Any] = [] for dataset in encoded_datasets: __snake_case : int = len(UpperCamelCase__ ) __snake_case : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __snake_case : Any = np.zeros((n_batch, 2) , dtype=np.intaa ) __snake_case : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) __snake_case : str = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(UpperCamelCase__ ): __snake_case : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __snake_case : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __snake_case : List[str] = with_conta __snake_case : List[Any] = with_conta __snake_case : Optional[int] = len(UpperCamelCase__ ) - 1 __snake_case : List[str] = len(UpperCamelCase__ ) - 1 __snake_case : Optional[int] = with_conta __snake_case : Optional[int] = with_conta __snake_case : Optional[Any] = mc_label __snake_case : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase_ ( ): __snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--model_name" , type=UpperCamelCase__ , default="openai-gpt" , help="pretrained model name" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." ) parser.add_argument( "--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument("--train_dataset" , type=UpperCamelCase__ , default="" ) parser.add_argument("--eval_dataset" , type=UpperCamelCase__ , default="" ) parser.add_argument("--seed" , type=UpperCamelCase__ , default=4_2 ) parser.add_argument("--num_train_epochs" , type=UpperCamelCase__ , default=3 ) parser.add_argument("--train_batch_size" , type=UpperCamelCase__ , default=8 ) parser.add_argument("--eval_batch_size" , type=UpperCamelCase__ , default=1_6 ) parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , type=UpperCamelCase__ , default=1 ) parser.add_argument( "--max_steps" , default=-1 , type=UpperCamelCase__ , help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ) , ) parser.add_argument( "--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=6.25e-5 ) parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." ) parser.add_argument("--lr_schedule" , type=UpperCamelCase__ , default="warmup_linear" ) parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.0_1 ) parser.add_argument("--lm_coef" , type=UpperCamelCase__ , default=0.9 ) parser.add_argument("--n_valid" , type=UpperCamelCase__ , default=3_7_4 ) parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." ) __snake_case : Tuple = parser.parse_args() print(UpperCamelCase__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __snake_case : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) __snake_case : int = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(UpperCamelCase__ , UpperCamelCase__ ) ) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True." ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __snake_case : Optional[Any] = ["_start_", "_delimiter_", "_classify_"] __snake_case : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(UpperCamelCase__ ) __snake_case : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) __snake_case : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(UpperCamelCase__ ) ) model.to(UpperCamelCase__ ) # Load and encode the datasets def tokenize_and_encode(__lowerCamelCase ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): return obj return [tokenize_and_encode(UpperCamelCase__ ) for o in obj] logger.info("Encoding dataset..." ) __snake_case : Dict = load_rocstories_dataset(args.train_dataset ) __snake_case : Optional[Any] = load_rocstories_dataset(args.eval_dataset ) __snake_case : Dict = (train_dataset, eval_dataset) __snake_case : str = tokenize_and_encode(UpperCamelCase__ ) # Compute the max input length for the Transformer __snake_case : Dict = model.config.n_positions // 2 - 2 __snake_case : Optional[int] = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __snake_case : Any = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __snake_case : List[str] = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) __snake_case , __snake_case : Tuple = tensor_datasets[0], tensor_datasets[1] __snake_case : str = TensorDataset(*UpperCamelCase__ ) __snake_case : Dict = RandomSampler(UpperCamelCase__ ) __snake_case : Optional[Any] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size ) __snake_case : int = TensorDataset(*UpperCamelCase__ ) __snake_case : List[str] = SequentialSampler(UpperCamelCase__ ) __snake_case : str = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __snake_case : Dict = args.max_steps __snake_case : Optional[int] = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1 else: __snake_case : Dict = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs __snake_case : List[str] = list(model.named_parameters() ) __snake_case : Optional[Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"] __snake_case : List[Any] = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0}, ] __snake_case : Union[str, Any] = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon ) __snake_case : List[str] = get_linear_schedule_with_warmup( UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ ) if args.do_train: __snake_case , __snake_case , __snake_case : Tuple = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ): __snake_case : Any = 0 __snake_case : List[str] = 0 __snake_case : Dict = tqdm(UpperCamelCase__ , desc="Training" ) for step, batch in enumerate(UpperCamelCase__ ): __snake_case : Any = tuple(t.to(UpperCamelCase__ ) for t in batch ) __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = batch __snake_case : Any = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ ) __snake_case : int = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __snake_case : Dict = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __snake_case : Any = "Training loss: {:.2e} lr: {:.2e}".format(UpperCamelCase__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __snake_case : Tuple = model.module if hasattr(UpperCamelCase__ , "module" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __snake_case : List[Any] = os.path.join(args.output_dir , UpperCamelCase__ ) __snake_case : str = os.path.join(args.output_dir , UpperCamelCase__ ) torch.save(model_to_save.state_dict() , UpperCamelCase__ ) model_to_save.config.to_json_file(UpperCamelCase__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __snake_case : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __snake_case : Tuple = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(UpperCamelCase__ ) if args.do_eval: model.eval() __snake_case , __snake_case : List[str] = 0, 0 __snake_case , __snake_case : Any = 0, 0 for batch in tqdm(UpperCamelCase__ , desc="Evaluating" ): __snake_case : str = tuple(t.to(UpperCamelCase__ ) for t in batch ) __snake_case , __snake_case , __snake_case , __snake_case : str = batch with torch.no_grad(): __snake_case , __snake_case , __snake_case , __snake_case : Dict = model( UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ ) __snake_case : Dict = mc_logits.detach().cpu().numpy() __snake_case : str = mc_labels.to("cpu" ).numpy() __snake_case : Optional[int] = accuracy(UpperCamelCase__ , UpperCamelCase__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __snake_case : Optional[int] = eval_loss / nb_eval_steps __snake_case : Optional[Any] = eval_accuracy / nb_eval_examples __snake_case : Any = tr_loss / nb_tr_steps if args.do_train else None __snake_case : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} __snake_case : str = os.path.join(args.output_dir , "eval_results.txt" ) with open(UpperCamelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , UpperCamelCase__ , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) if __name__ == "__main__": main()
123
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging A__ : List[Any] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""] A__ : Dict = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() A__ : Dict = logging.get_logger(__name__) A__ : List[Any] = """ Hello world! cécé herlolip""" A__ : Optional[Any] = [ ("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""), ("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""), ("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""), ("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""), ] def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: lowerCamelCase_ : Union[str, Any] =[ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> List[Any]: lowerCamelCase_ : Tuple =dct.pop(UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] =val def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Any: lowerCamelCase_ : Tuple =torch.load(UpperCamelCase__ , map_location="cpu" ) lowerCamelCase_ : List[str] =torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def _snake_case ( lowerCamelCase__ : Any ) -> Tuple: lowerCamelCase_ , lowerCamelCase_ : Any =emb.weight.shape lowerCamelCase_ : Optional[int] =nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) lowerCamelCase_ : Any =emb.weight.data return lin_layer @torch.no_grad() def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=None ) -> int: if not os.path.exists(UpperCamelCase__ ): lowerCamelCase_ : List[Any] =torch.hub.load("pytorch/fairseq" , UpperCamelCase__ ).eval() else: lowerCamelCase_ : Optional[int] =load_xsum_checkpoint(UpperCamelCase__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCamelCase_ : Optional[Any] =checkpoint_path.replace("." , "-" ) lowerCamelCase_ : Optional[Any] =BartConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase_ : Any =bart.encode(UpperCamelCase__ ).unsqueeze(0 ) lowerCamelCase_ : List[Any] =BartTokenizer.from_pretrained(UpperCamelCase__ ).encode(UpperCamelCase__ , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(UpperCamelCase__ , UpperCamelCase__ ).all(): raise ValueError( F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": lowerCamelCase_ : Tuple =bart.state_dict() remove_ignore_keys_(UpperCamelCase__ ) lowerCamelCase_ : List[Any] =state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ : Tuple =BartForSequenceClassification(UpperCamelCase__ ).eval() model.load_state_dict(UpperCamelCase__ ) lowerCamelCase_ : Any =bart.predict("mnli" , UpperCamelCase__ , return_logits=UpperCamelCase__ ) lowerCamelCase_ : Tuple =model(UpperCamelCase__ )[0] # logits else: # no classification heads to worry about lowerCamelCase_ : Optional[Any] =bart.model.state_dict() remove_ignore_keys_(UpperCamelCase__ ) lowerCamelCase_ : Optional[int] =state_dict["decoder.embed_tokens.weight"] lowerCamelCase_ : Any =bart.extract_features(UpperCamelCase__ ) if hf_checkpoint_name == "facebook/bart-large": lowerCamelCase_ : List[str] =BartModel(UpperCamelCase__ ).eval() model.load_state_dict(UpperCamelCase__ ) lowerCamelCase_ : Dict =model(UpperCamelCase__ ).model[0] else: lowerCamelCase_ : List[str] =BartForConditionalGeneration(UpperCamelCase__ ).eval() # an existing summarization ckpt model.model.load_state_dict(UpperCamelCase__ ) if hasattr(UpperCamelCase__ , "lm_head" ): lowerCamelCase_ : Union[str, Any] =make_linear_from_emb(model.model.shared ) lowerCamelCase_ : List[Any] =model.model(UpperCamelCase__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": A__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) A__ : str = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
144
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
import os import jsonlines import numpy as np from tqdm import tqdm lowerCAmelCase__ = 2_0_4_8 lowerCAmelCase__ = 4_0_9_6 lowerCAmelCase__ = 4_2 lowerCAmelCase__ = os.environ.pop("""PROCESS_TRAIN""", """false""") lowerCAmelCase__ = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict: '''simple docstring''' def choose_first(SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple=False ): assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) if len(UpperCamelCase__ ) == 1: A__ = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: A__ = {k: [a[k]] for k in a} if len(a["start_token"] ) > 0: break return a A__ = {"id": example["id"]} A__ = example["annotations"] A__ = annotation["yes_no_answer"] if 0 in yes_no_answer or 1 in yes_no_answer: A__ = ["yes"] if 1 in yes_no_answer else ["no"] A__ = A__ = [] A__ = A__ = [] A__ = ["<cls>"] else: A__ = ["short"] A__ = choose_first(annotation["short_answers"] ) if len(out["start_token"] ) == 0: # answer will be long if short is not available A__ = ["long"] A__ = choose_first(annotation["long_answer"] , is_long_answer=UpperCamelCase__ ) A__ = [] answer.update(UpperCamelCase__ ) # disregard some samples if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]: A__ = True else: A__ = False A__ = ["start_token", "end_token", "start_byte", "end_byte", "text"] if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ): raise ValueError("Issue in ID" , example["id"] ) return answer def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any]=False ) -> Tuple: '''simple docstring''' A__ = _get_single_answer(UpperCamelCase__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element A__ = example["document"]["tokens"] A__ = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples A__ = ["start_token", "end_token"] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 A__ = example["document"]["tokens"] A__ = answer["start_token"] A__ = answer["end_token"] A__ = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 A__ = " ".join(context[start_token:end_token] ) # checking above code if assertion: A__ = doc["is_html"][answer["start_token"] : answer["end_token"]] A__ = doc["token"][answer["start_token"] : answer["end_token"]] A__ = " ".join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] ) if new != old: print("ID:" , example["id"] ) print("New:" , UpperCamelCase__ , end="\n" ) print("Old:" , UpperCamelCase__ , end="\n\n" ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int]=2_0_4_8 , SCREAMING_SNAKE_CASE_: Optional[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_: Union[str, Any]=True ) -> Dict: '''simple docstring''' A__ = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ ) A__ = out["answer"] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } A__ = tokenizer(example["question"]["text"] , out["context"] ).input_ids A__ = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element A__ = [] A__ = [] A__ = input_ids[:q_len] A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) for i in doc_start_indices: A__ = i + max_length - q_len A__ = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["category"][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(UpperCamelCase__ ), "end_token": [-1_0_0] * len(UpperCamelCase__ ), "category": category, }, } A__ = out["context"].split() A__ = splitted_context[answer["end_token"]] A__ = len( tokenizer( " ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=UpperCamelCase__ , ).input_ids ) A__ = len( tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=UpperCamelCase__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token A__ = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 A__ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive A__ = answer["start_token"] A__ = answer["end_token"] if assertion: A__ = tokenizer.decode(UpperCamelCase__ ) if answer["span"] != new: print("ISSUE IN TOKENIZATION" ) print("OLD:" , answer["span"] ) print("NEW:" , UpperCamelCase__ , end="\n\n" ) if len(UpperCamelCase__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } A__ = input_ids[:q_len] A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) A__ = [] A__ = [] A__ = [] A__ = [] # null, yes, no, long, short for i in doc_start_indices: A__ = i + max_length - q_len A__ = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: A__ = start_token - i + q_len A__ = end_token - i + q_len answers_category.append(answer["category"][0] ) # ["short"] -> "short" else: A__ = -1_0_0 A__ = -1_0_0 answers_category.append("null" ) A__ = inputs[-1][start_token : end_token + 1] answers_start_token.append(UpperCamelCase__ ) answers_end_token.append(UpperCamelCase__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("ISSUE in strided for ID:" , example["id"] ) print("New:" , tokenizer.decode(UpperCamelCase__ ) ) print("Old:" , tokenizer.decode(UpperCamelCase__ ) , end="\n\n" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict=2_0_4_8 , SCREAMING_SNAKE_CASE_: List[str]=4_0_9_6 , SCREAMING_SNAKE_CASE_: Dict=False ) -> Optional[Any]: '''simple docstring''' A__ = get_strided_contexts_and_ans( UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , ) return example def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Tuple ) -> List[str]: '''simple docstring''' with jsonlines.open(UpperCamelCase__ , "a" ) as writer: for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc="Saving samples ... " ): A__ = example["labels"] for ids, start, end, cat in zip( example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { "input_ids": ids, "start_token": start, "end_token": end, "category": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCAmelCase__ = load_dataset("""natural_questions""") lowerCAmelCase__ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") lowerCAmelCase__ = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""] lowerCAmelCase__ = { """tokenizer""": tokenizer, """doc_stride""": DOC_STRIDE, """max_length""": MAX_LENGTH, """assertion""": False, } lowerCAmelCase__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCAmelCase__ = data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) lowerCAmelCase__ = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl""" save_to_disk(data, file_name=cache_file_name)
68
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _snake_case ( lowercase_ ): def __init__( self , _a , _a , _a , _a , ): super().__init__() __magic_name__ : Any = value_function __magic_name__ : List[Any] = unet __magic_name__ : Tuple = scheduler __magic_name__ : str = env __magic_name__ : List[str] = env.get_dataset() __magic_name__ : List[Any] = {} for key in self.data.keys(): try: __magic_name__ : str = self.data[key].mean() except: # noqa: E722 pass __magic_name__ : int = {} for key in self.data.keys(): try: __magic_name__ : List[str] = self.data[key].std() except: # noqa: E722 pass __magic_name__ : Any = env.observation_space.shape[0] __magic_name__ : Any = env.action_space.shape[0] def SCREAMING_SNAKE_CASE ( self , _a , _a ): return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE ( self , _a , _a ): return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE ( self , _a ): if type(_a ) is dict: return {k: self.to_torch(_a ) for k, v in x_in.items()} elif torch.is_tensor(_a ): return x_in.to(self.unet.device ) return torch.tensor(_a , device=self.unet.device ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): for key, val in cond.items(): __magic_name__ : List[Any] = val.clone() return x_in def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ): __magic_name__ : Union[str, Any] = x.shape[0] __magic_name__ : Optional[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __magic_name__ : Optional[int] = torch.full((batch_size,) , _a , device=self.unet.device , dtype=torch.long ) for _ in range(_a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __magic_name__ : Any = self.value_function(x.permute(0 , 2 , 1 ) , _a ).sample __magic_name__ : Optional[Any] = torch.autograd.grad([y.sum()] , [x] )[0] __magic_name__ : List[str] = self.scheduler._get_variance(_a ) __magic_name__ : Any = torch.exp(0.5 * posterior_variance ) __magic_name__ : Tuple = model_std * grad __magic_name__ : Tuple = 0 __magic_name__ : Dict = x.detach() __magic_name__ : Optional[int] = x + scale * grad __magic_name__ : str = self.reset_xa(_a , _a , self.action_dim ) __magic_name__ : str = self.unet(x.permute(0 , 2 , 1 ) , _a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __magic_name__ : Tuple = self.scheduler.step(_a , _a , _a , predict_epsilon=_a )["prev_sample"] # apply conditions to the trajectory (set the initial state) __magic_name__ : Union[str, Any] = self.reset_xa(_a , _a , self.action_dim ) __magic_name__ : Optional[int] = self.to_torch(_a ) return x, y def __call__( self , _a , _a=64 , _a=32 , _a=2 , _a=0.1 ): # normalize the observations and create batch dimension __magic_name__ : Optional[Any] = self.normalize(_a , "observations" ) __magic_name__ : Optional[int] = obs[None].repeat(_a , axis=0 ) __magic_name__ : List[str] = {0: self.to_torch(_a )} __magic_name__ : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __magic_name__ : Optional[int] = randn_tensor(_a , device=self.unet.device ) __magic_name__ : str = self.reset_xa(_a , _a , self.action_dim ) __magic_name__ : Tuple = self.to_torch(_a ) # run the diffusion process __magic_name__ , __magic_name__ : int = self.run_diffusion(_a , _a , _a , _a ) # sort output trajectories by value __magic_name__ : Any = y.argsort(0 , descending=_a ).squeeze() __magic_name__ : Union[str, Any] = x[sorted_idx] __magic_name__ : str = sorted_values[:, :, : self.action_dim] __magic_name__ : Union[str, Any] = actions.detach().cpu().numpy() __magic_name__ : Tuple = self.de_normalize(_a , key="actions" ) # select the action with the highest value if y is not None: __magic_name__ : int = 0 else: # if we didn't run value guiding, select a random action __magic_name__ : Union[str, Any] = np.random.randint(0 , _a ) __magic_name__ : Dict = denorm_actions[selected_index, 0] return denorm_actions
281
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase = logging.get_logger(__name__) def lowercase ( a__ : List[str] , a__ : Optional[Any] , a__ : Dict ) -> Any: return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def lowercase ( a__ : List[str] , a__ : Optional[Any] , a__ : Dict = None ) -> List[Any]: _UpperCamelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR _UpperCamelCase = to_pil_image(UpperCamelCase__ ) _UpperCamelCase , _UpperCamelCase = pil_image.size _UpperCamelCase = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _UpperCamelCase = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()] _UpperCamelCase = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices] _UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices] _UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices] _UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices] _UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _UpperCamelCase = [] for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): _UpperCamelCase = [x, y, x + w, y + h] actual_boxes.append(UpperCamelCase__ ) # finally, normalize the bounding boxes _UpperCamelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( lowercase_): snake_case__ = ['''pixel_values'''] def __init__( self : Any , __UpperCamelCase : str = True , __UpperCamelCase : Any = None , __UpperCamelCase : Optional[int] = PILImageResampling.BILINEAR , __UpperCamelCase : Union[str, Any] = True , __UpperCamelCase : List[str] = None , __UpperCamelCase : int = "" , **__UpperCamelCase : Optional[Any] , ) -> Any: super().__init__(**__UpperCamelCase ) _UpperCamelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(__UpperCamelCase ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = apply_ocr _UpperCamelCase = ocr_lang _UpperCamelCase = tesseract_config def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : int = PILImageResampling.BILINEAR , __UpperCamelCase : int = None , **__UpperCamelCase : List[Any] , ) -> List[str]: _UpperCamelCase = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) _UpperCamelCase = (size['''height'''], size['''width''']) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def _UpperCamelCase ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : Any = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : List[Any] = ChannelDimension.FIRST , **__UpperCamelCase : str , ) -> Any: _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(__UpperCamelCase ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr _UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang _UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config _UpperCamelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) _UpperCamelCase = [] _UpperCamelCase = [] for image in images: _UpperCamelCase , _UpperCamelCase = apply_tesseract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) words_batch.append(__UpperCamelCase ) boxes_batch.append(__UpperCamelCase ) if do_resize: _UpperCamelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) _UpperCamelCase = [flip_channel_order(__UpperCamelCase ) for image in images] _UpperCamelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__UpperCamelCase ) if apply_ocr: _UpperCamelCase = words_batch _UpperCamelCase = boxes_batch return data
256
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
'''simple docstring''' from bisect import bisect from itertools import accumulate def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=UpperCamelCase__ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = [i[0] for i in r], [i[1] for i in r] _SCREAMING_SNAKE_CASE : Dict = list(accumulate(UpperCamelCase__ ) ) _SCREAMING_SNAKE_CASE : str = bisect(UpperCamelCase__ , UpperCamelCase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
200
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
'''simple docstring''' from __future__ import annotations import numpy as np def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase , UpperCAmelCase : Any = np.shape(UpperCamelCase__ ) if rows != columns: UpperCAmelCase : Any = ( """\'table\' has to be of square shaped array but got a """ F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) UpperCAmelCase : int = np.zeros((rows, columns) ) UpperCAmelCase : List[str] = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): UpperCAmelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError("""No LU decomposition exists""" ) UpperCAmelCase : Optional[int] = (table[i][j] - total) / upper[j][j] UpperCAmelCase : List[Any] = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase : List[str] = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) UpperCAmelCase : List[str] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
265
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
from typing import Dict, Optional import numpy as np import datasets A : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ A : List[Any] = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ A : Optional[int] = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def lowercase_ ( _A : int , _A : Any , _A : List[Any] , _A : Optional[Any] , _A : Any = None , _A : int = False , ): """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): lowerCamelCase__ : Tuple = new_id # turn into Numpy arrays lowerCamelCase__ : Any = np.array(UpperCamelCase__ ) lowerCamelCase__ : List[str] = np.array(UpperCamelCase__ ) if reduce_labels: lowerCamelCase__ : List[Any] = 255 lowerCamelCase__ : Tuple = label - 1 lowerCamelCase__ : int = 255 lowerCamelCase__ : List[Any] = label != ignore_index lowerCamelCase__ : Any = np.not_equal(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : int = pred_label[mask] lowerCamelCase__ : Dict = np.array(UpperCamelCase__ )[mask] lowerCamelCase__ : int = pred_label[pred_label == label] lowerCamelCase__ : Tuple = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ : Union[str, Any] = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ : Dict = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowerCamelCase__ : int = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase_ ( _A : Optional[Any] , _A : List[str] , _A : Tuple , _A : Optional[Any] , _A : Dict = None , _A : Tuple = False , ): """simple docstring""" lowerCamelCase__ : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ : str = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase_ ( _A : Optional[Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Any = None , _A : Tuple = None , _A : List[Any] = False , ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = total_intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # compute metrics lowerCamelCase__ : Any = {} lowerCamelCase__ : Dict = total_area_intersect.sum() / total_area_label.sum() lowerCamelCase__ : List[Any] = total_area_intersect / total_area_union lowerCamelCase__ : str = total_area_intersect / total_area_label lowerCamelCase__ : int = np.nanmean(UpperCamelCase__ ) lowerCamelCase__ : int = np.nanmean(UpperCamelCase__ ) lowerCamelCase__ : List[str] = all_acc lowerCamelCase__ : List[Any] = iou lowerCamelCase__ : List[str] = acc if nan_to_num is not None: lowerCamelCase__ : Tuple = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) , reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] , ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str = None , __lowerCamelCase : int = None , __lowerCamelCase : Optional[Any] = False , ): '''simple docstring''' lowerCamelCase__ : Tuple = mean_iou( results=__lowerCamelCase , gt_seg_maps=__lowerCamelCase , num_labels=__lowerCamelCase , ignore_index=__lowerCamelCase , nan_to_num=__lowerCamelCase , label_map=__lowerCamelCase , reduce_labels=__lowerCamelCase , ) return iou_result
184
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowerCamelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): lowerCAmelCase : Optional[datasets.Features] = None def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , ): import pyspark def generate_fn(): _UpperCAmelCase : Any = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: _UpperCAmelCase : Any = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" ) _UpperCAmelCase : Dict = partition_df.collect() _UpperCAmelCase : int = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None , ) ->List[str]: '''simple docstring''' _UpperCAmelCase : str = df _UpperCAmelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() ) _UpperCAmelCase : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ) ->Any: '''simple docstring''' yield from self.generate_examples_fn() def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Any: '''simple docstring''' _UpperCAmelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCamelCase__ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase__ ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ) ->Dict: '''simple docstring''' _UpperCAmelCase : Any = self.split_shard_indices_by_worker(lowerCamelCase__ , lowerCamelCase__ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase__ ) @property def lowerCAmelCase__ ( self : List[Any] ) ->int: '''simple docstring''' return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): lowerCAmelCase : Dict = SparkConfig def __init__( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , **lowerCamelCase__ : Tuple , ) ->Any: '''simple docstring''' import pyspark _UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate() _UpperCAmelCase : List[Any] = df _UpperCAmelCase : str = working_dir super().__init__( cache_dir=lowerCamelCase__ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase__ , ) def lowerCAmelCase__ ( self : Dict ) ->str: '''simple docstring''' def create_cache_and_write_probe(lowerCamelCase__ : Dict ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowerCamelCase__ ) _UpperCAmelCase : str = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCamelCase__ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _UpperCAmelCase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def lowerCAmelCase__ ( self : Any ) ->Dict: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->Tuple: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] ) ->str: '''simple docstring''' import pyspark def get_arrow_batch_size(lowerCamelCase__ : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) _UpperCAmelCase : Optional[int] = self.df.count() _UpperCAmelCase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _UpperCAmelCase : Union[str, Any] = ( self.df.limit(lowerCamelCase__ ) .repartition(1 ) .mapInArrow(lowerCamelCase__ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _UpperCAmelCase : Any = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _UpperCAmelCase : Optional[Any] = min(lowerCamelCase__ , int(approx_total_size / max_shard_size ) ) _UpperCAmelCase : Optional[Any] = self.df.repartition(lowerCamelCase__ ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , ) ->List[str]: '''simple docstring''' import pyspark _UpperCAmelCase : str = ParquetWriter if file_format == "parquet" else ArrowWriter _UpperCAmelCase : Dict = os.path.join(self._working_dir , os.path.basename(lowerCamelCase__ ) ) if self._working_dir else fpath _UpperCAmelCase : int = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _UpperCAmelCase : int = self.config.features _UpperCAmelCase : Any = self._writer_batch_size _UpperCAmelCase : Optional[Any] = self._fs.storage_options def write_arrow(lowerCamelCase__ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _UpperCAmelCase : Tuple = pyspark.TaskContext().taskAttemptId() _UpperCAmelCase : int = next(lowerCamelCase__ , lowerCamelCase__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) _UpperCAmelCase : Dict = 0 _UpperCAmelCase : str = writer_class( features=lowerCamelCase__ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase__ , storage_options=lowerCamelCase__ , embed_local_files=lowerCamelCase__ , ) _UpperCAmelCase : Tuple = pa.Table.from_batches([first_batch] ) writer.write_table(lowerCamelCase__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 _UpperCAmelCase : Union[str, Any] = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase__ , storage_options=lowerCamelCase__ , embed_local_files=lowerCamelCase__ , ) _UpperCAmelCase : Any = pa.Table.from_batches([batch] ) writer.write_table(lowerCamelCase__ ) if writer._num_bytes > 0: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCamelCase__ ) ): _UpperCAmelCase : Dict = os.path.join(os.path.dirname(lowerCamelCase__ ) , os.path.basename(lowerCamelCase__ ) ) shutil.move(lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : int = ( self.df.mapInArrow(lowerCamelCase__ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] = "arrow" , lowerCamelCase__ : str = None , lowerCamelCase__ : Dict = None , **lowerCamelCase__ : Optional[Any] , ) ->Union[str, Any]: '''simple docstring''' self._validate_cache_dir() _UpperCAmelCase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCamelCase__ ) _UpperCAmelCase : Any = not is_remote_filesystem(self._fs ) _UpperCAmelCase : Tuple = os.path.join if is_local else posixpath.join _UpperCAmelCase : int = "-TTTTT-SSSSS-of-NNNNN" _UpperCAmelCase : int = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _UpperCAmelCase : List[Any] = path_join(self._output_dir , lowerCamelCase__ ) _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : str = [] _UpperCAmelCase : str = [] for task_id, content in self._prepare_split_single(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : str = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCamelCase__ ) _UpperCAmelCase : int = total_num_examples _UpperCAmelCase : List[str] = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: _UpperCAmelCase : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _UpperCAmelCase : List[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , ): rename( lowerCamelCase__ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , ) _UpperCAmelCase : int = [] _UpperCAmelCase : Tuple = 0 for i in range(len(lowerCamelCase__ ) ): _UpperCAmelCase , _UpperCAmelCase : List[Any] = task_id_and_num_shards[i] for shard_id in range(lowerCamelCase__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCamelCase__ , len(lowerCamelCase__ ) ).map(lambda lowerCamelCase__ : _rename_shard(*lowerCamelCase__ ) ).collect() else: # don't use any pattern _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Any = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(lowerCamelCase__ , "" ) , ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int] , ) ->Any: '''simple docstring''' return SparkExamplesIterable(self.df )
234
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=32 * 8 , lowerCAmelCase=32 * 8 , lowerCAmelCase=4 , lowerCAmelCase=64 , ) -> str: '''simple docstring''' _lowercase =parent _lowercase =batch_size _lowercase =is_training _lowercase =use_auxiliary_loss _lowercase =num_queries _lowercase =num_channels _lowercase =min_size _lowercase =max_size _lowercase =num_labels _lowercase =hidden_dim _lowercase =hidden_dim def A__ ( self ) -> str: '''simple docstring''' _lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCAmelCase ) _lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase ) _lowercase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase ) > 0.5 ).float() _lowercase =(torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase ) > 0.5).long() _lowercase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A__ ( self ) -> List[Any]: '''simple docstring''' _lowercase =MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowercase =self.num_queries _lowercase =self.num_labels _lowercase =[1, 1, 1, 1] _lowercase =self.num_channels _lowercase =64 _lowercase =128 _lowercase =self.hidden_dim _lowercase =self.hidden_dim _lowercase =self.hidden_dim return config def A__ ( self ) -> int: '''simple docstring''' _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs() _lowercase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]: '''simple docstring''' _lowercase =output.encoder_hidden_states _lowercase =output.pixel_decoder_hidden_states _lowercase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase ) , config.decoder_layers ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> int: '''simple docstring''' with torch.no_grad(): _lowercase =MaskaFormerModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() _lowercase =model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase ) _lowercase =model(lowerCAmelCase , output_hidden_states=lowerCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple: '''simple docstring''' _lowercase =MaskaFormerForUniversalSegmentation(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() def comm_check_on_output(lowerCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowercase =model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase ) _lowercase =model(lowerCAmelCase ) comm_check_on_output(lowerCAmelCase ) _lowercase =model( pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ) comm_check_on_output(lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): _a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False def A__ ( self ) -> Union[str, Any]: '''simple docstring''' _lowercase =MaskaFormerModelTester(self ) _lowercase =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase ) def A__ ( self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase ) def A__ ( self ) -> List[Any]: '''simple docstring''' _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def A__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def A__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def A__ ( self ) -> str: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A__ ( self ) -> List[Any]: '''simple docstring''' pass def A__ ( self ) -> Optional[Any]: '''simple docstring''' _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase =model_class(lowerCAmelCase ) _lowercase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase =[*signature.parameters.keys()] _lowercase =['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) @slow def A__ ( self ) -> Dict: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowercase =MaskaFormerModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =(self.model_tester.min_size,) * 2 _lowercase ={ 'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase ).long(), } _lowercase =self.model_tester.get_config() _lowercase =MaskaFormerForUniversalSegmentation(lowerCAmelCase ).to(lowerCAmelCase ) _lowercase =model(**lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase =model_class(lowerCAmelCase ).to(lowerCAmelCase ) _lowercase =model(**lowerCAmelCase , output_attentions=lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A__ ( self ) -> Dict: '''simple docstring''' if not self.model_tester.is_training: return _lowercase =self.all_model_classes[1] _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs() _lowercase =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.train() _lowercase =model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ).loss loss.backward() def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =self.all_model_classes[1] _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs() _lowercase =True _lowercase =True _lowercase =model_class(lowerCAmelCase ).to(lowerCAmelCase ) model.train() _lowercase =model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ) _lowercase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowercase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowercase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowercase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowercase_ = 1e-4 def a ( ) -> Any: """simple docstring""" _lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): @cached_property def A__ ( self ) -> Union[str, Any]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def A__ ( self ) -> List[Any]: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def A__ ( self ) -> Any: '''simple docstring''' _lowercase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase ) _lowercase =self.default_image_processor _lowercase =prepare_img() _lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) _lowercase =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): _lowercase =model(**lowerCAmelCase ) _lowercase =torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) ) _lowercase =torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) ) _lowercase =torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) ) def A__ ( self ) -> List[Any]: '''simple docstring''' _lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase ).eval() _lowercase =self.default_image_processor _lowercase =prepare_img() _lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) _lowercase =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): _lowercase =model(**lowerCAmelCase ) # masks_queries_logits _lowercase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _lowercase =[ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] _lowercase =torch.tensor(lowerCAmelCase ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) ) # class_queries_logits _lowercase =outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) _lowercase =torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) ) def A__ ( self ) -> int: '''simple docstring''' _lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase ).eval() _lowercase =self.default_image_processor _lowercase =image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) _lowercase =inputs['pixel_values'].to(lowerCAmelCase ) _lowercase =[el.to(lowerCAmelCase ) for el in inputs['mask_labels']] _lowercase =[el.to(lowerCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): _lowercase =model(**lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
205
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset a_ = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) a_ = dataset.iloc[:, 1:2].values a_ = dataset.iloc[:, 2].values a_ = train_test_split(X, y, test_size=0.2, random_state=0) a_ = PolynomialFeatures(degree=4) a_ = poly_reg.fit_transform(X) a_ = LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> Any: """simple docstring""" plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color="red" ) plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
340
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _snake_case : Tuple = True from torch.cuda.amp import autocast _snake_case : int = logging.getLogger(__name__) @dataclass class a : """simple docstring""" __UpperCAmelCase : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __UpperCAmelCase : Optional[str] = field( default=lowercase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase : Optional[bool] = field( default=lowercase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) __UpperCAmelCase : Optional[bool] = field( default=lowercase_ , metadata={"help": "Whether to log verbose messages or not."} , ) __UpperCAmelCase : Optional[float] = field( default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} ) __UpperCAmelCase : Optional[float] = field( default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} ) __UpperCAmelCase : Optional[float] = field( default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) __snake_case : Optional[Any] = logging.WARNING if model_args.verbose_logging: __snake_case : List[Any] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __snake_case : Optional[Any] = logging.INFO logger.setLevel(UpperCamelCase__ ) @dataclass class a : """simple docstring""" __UpperCAmelCase : str = field( default=lowercase_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) __UpperCAmelCase : Optional[str] = field( default=lowercase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) __UpperCAmelCase : Optional[str] = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'" } , ) __UpperCAmelCase : Optional[str] = field( default="validation" , metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'" ) } , ) __UpperCAmelCase : Optional[str] = field( default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , ) __UpperCAmelCase : bool = field( default=lowercase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __UpperCAmelCase : Optional[int] = field( default=1 , metadata={ "help": "The percentage of the train set used as validation set in case there\'s no validation split" } , ) __UpperCAmelCase : Optional[int] = field( default=lowercase_ , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCAmelCase : Optional[float] = field( default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class a : """simple docstring""" __UpperCAmelCase : WavaVecaForPreTraining __UpperCAmelCase : WavaVecaFeatureExtractor __UpperCAmelCase : Union[bool, str] = "longest" __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[int] = None def __call__( self : str , lowerCamelCase : str ) -> Union[str, Any]: # reformat list to dict and set to pytorch format __snake_case : Union[str, Any] = self.feature_extractor.pad( lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) __snake_case : List[str] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) __snake_case : Union[str, Any] = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __snake_case : int = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) __snake_case : List[Any] = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __snake_case : List[Any] = 1 __snake_case : Optional[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __snake_case : Any = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCamelCase , min_masks=2 , ) return batch class a (lowercase_ ): """simple docstring""" def __init__( self : str , *lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=0 , lowerCamelCase : int=1.0 , **lowerCamelCase : Tuple ) -> int: super().__init__(*lowerCamelCase , **lowerCamelCase ) __snake_case : List[str] = 0 __snake_case : Optional[int] = max_gumbel_temp __snake_case : int = min_gumbel_temp __snake_case : List[Any] = gumbel_temp_decay def __snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ) -> Optional[int]: model.train() __snake_case : List[Any] = self._prepare_inputs(lowerCamelCase ) if self.use_amp: with autocast(): __snake_case : int = self.compute_loss(lowerCamelCase , lowerCamelCase ) else: __snake_case : int = self.compute_loss(lowerCamelCase , lowerCamelCase ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __snake_case : Any = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __snake_case : List[str] = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: __snake_case : List[str] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCamelCase ).backward() elif self.use_apex: with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCamelCase ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def lowerCAmelCase_ ( ): __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses() configure_logger(UpperCamelCase__ , UpperCamelCase__ ) # Downloading and loading a dataset from the hub. __snake_case : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __snake_case : Dict = DatasetDict() __snake_case : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , ) __snake_case : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" __snake_case : str = DatasetDict() __snake_case : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , ) __snake_case : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported __snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCamelCase__ ) def prepare_dataset(__lowerCamelCase ): # check that all files have the correct sampling rate __snake_case , __snake_case : List[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __snake_case : Optional[Any] = datasets.map( UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names ) # filter audio files that are too long __snake_case : Optional[Any] = vectorized_datasets.filter( lambda __lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__lowerCamelCase ): return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __snake_case : Tuple = vectorized_datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __snake_case : int = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm=\'layer\'" ) __snake_case : Any = WavaVecaForPreTraining(UpperCamelCase__ ) __snake_case : str = DataCollatorForWavaVecaPretraining(model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) __snake_case : Any = WavaVecaPreTrainer( model=UpperCamelCase__ , data_collator=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=UpperCamelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
123
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import numpy as np A__ : Optional[Any] = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class lowercase__ : def __init__( self : Any ): lowerCamelCase_ : str =np.array(snake_case__ ) def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ): lowerCamelCase_ , lowerCamelCase_ : Optional[int] =np.where(letter == self.SQUARE ) lowerCamelCase_ : Any =np.concatenate([indexa + 1, indexa + 1] ) return indexes def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : Union[str, Any] =self.SQUARE[indexa - 1, indexa - 1] return letter def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict ): lowerCamelCase_ : Optional[Any] =message.lower() lowerCamelCase_ : int =message.replace(" " , "" ) lowerCamelCase_ : Any =message.replace("j" , "i" ) lowerCamelCase_ : Dict =np.empty((2, len(snake_case__ )) ) for letter_index in range(len(snake_case__ ) ): lowerCamelCase_ : Any =self.letter_to_numbers(message[letter_index] ) lowerCamelCase_ : Dict =numbers[0] lowerCamelCase_ : str =numbers[1] lowerCamelCase_ : str =first_step.reshape(2 * len(snake_case__ ) ) lowerCamelCase_ : Tuple ="" for numbers_index in range(len(snake_case__ ) ): lowerCamelCase_ : List[str] =int(second_step[numbers_index * 2] ) lowerCamelCase_ : str =int(second_step[(numbers_index * 2) + 1] ) lowerCamelCase_ : Optional[Any] =self.numbers_to_letter(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[Any] =encoded_message + letter return encoded_message def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : List[Any] =message.lower() message.replace(" " , "" ) lowerCamelCase_ : Tuple =np.empty(2 * len(snake_case__ ) ) for letter_index in range(len(snake_case__ ) ): lowerCamelCase_ : List[Any] =self.letter_to_numbers(message[letter_index] ) lowerCamelCase_ : int =numbers[0] lowerCamelCase_ : int =numbers[1] lowerCamelCase_ : List[Any] =first_step.reshape((2, len(snake_case__ )) ) lowerCamelCase_ : Union[str, Any] ="" for numbers_index in range(len(snake_case__ ) ): lowerCamelCase_ : Optional[Any] =int(second_step[0, numbers_index] ) lowerCamelCase_ : Optional[Any] =int(second_step[1, numbers_index] ) lowerCamelCase_ : Tuple =self.numbers_to_letter(snake_case__ , snake_case__ ) lowerCamelCase_ : Union[str, Any] =decoded_message + letter return decoded_message
144
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): """simple docstring""" def __init__( self ) -> List[Any]: '''simple docstring''' super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def UpperCamelCase ( self , lowercase ) -> str: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowercase ) ) ) class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowercase , model.state_dict() ) A__ = os.path.join(lowercase , "index.json" ) self.assertTrue(os.path.isfile(lowercase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: A__ = os.path.join(lowercase , F'{key}.dat' ) self.assertTrue(os.path.isfile(lowercase ) ) # TODO: add tests on the fact weights are properly loaded def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: A__ = torch.randn(2 , 3 , dtype=lowercase ) with TemporaryDirectory() as tmp_dir: A__ = offload_weight(lowercase , "weight" , lowercase , {} ) A__ = os.path.join(lowercase , "weight.dat" ) self.assertTrue(os.path.isfile(lowercase ) ) self.assertDictEqual(lowercase , {"weight": {"shape": [2, 3], "dtype": str(lowercase ).split("." )[1]}} ) A__ = load_offloaded_weight(lowercase , index["weight"] ) self.assertTrue(torch.equal(lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = ModelForTest() A__ = model.state_dict() A__ = {k: v for k, v in state_dict.items() if "linear2" not in k} A__ = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowercase , lowercase ) A__ = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase ) # Every key is there with the right value self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowercase , weight_map[key] ) ) A__ = {k: v for k, v in state_dict.items() if "weight" in k} A__ = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowercase , lowercase ) A__ = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase ) # Every key is there with the right value self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowercase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowercase , lowercase ) # Duplicates are removed A__ = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase ) # Every key is there with the right value self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowercase , weight_map[key] ) ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = {"a.1": 0, "a.10": 1, "a.2": 2} A__ = extract_submodules_state_dict(lowercase , ["a.1", "a.2"] ) self.assertDictEqual(lowercase , {"a.1": 0, "a.2": 2} ) A__ = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} A__ = extract_submodules_state_dict(lowercase , ["a.1", "a.2"] ) self.assertDictEqual(lowercase , {"a.1.a": 0, "a.2.a": 2} )
68
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer snake_case : str = logging.get_logger(__name__) snake_case : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case : Optional[Any] = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } snake_case : List[Any] = { """junnyu/roformer_chinese_small""": 1_536, """junnyu/roformer_chinese_base""": 1_536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } snake_case : Union[str, Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class _snake_case ( lowercase_ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ = RoFormerTokenizer def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ): super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , ) __magic_name__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , _a ) != do_lower_case or pre_tok_state.get("strip_accents" , _a ) != strip_accents ): __magic_name__ : Optional[Any] = getattr(_a , pre_tok_state.pop("type" ) ) __magic_name__ : List[str] = do_lower_case __magic_name__ : Dict = strip_accents __magic_name__ : List[str] = pre_tok_class(**_a ) __magic_name__ : Any = do_lower_case def __getstate__( self ): __magic_name__ : List[str] = self.__dict__.copy() __magic_name__ : Optional[int] = BertPreTokenizer() return state def __setstate__( self , _a ): __magic_name__ : Union[str, Any] = d __magic_name__ : str = self.__dict__["_tokenizer"].get_vocab() __magic_name__ : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(_a ) ) def SCREAMING_SNAKE_CASE ( self , _a , _a=None ): __magic_name__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE ( self , _a , _a = None ): __magic_name__ : Tuple = [self.sep_token_id] __magic_name__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self , _a , _a = None ): __magic_name__ : Dict = self._tokenizer.model.save(_a , name=_a ) return tuple(_a ) def SCREAMING_SNAKE_CASE ( self , _a , _a=None , _a=None , _a=False , **_a , ): __magic_name__ : int = BertPreTokenizer() return super().save_pretrained(_a , _a , _a , _a , **_a )
281
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
256
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase_ ( self ): torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def UpperCAmelCase_ ( self ): torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def UpperCAmelCase_ ( self ): torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) _SCREAMING_SNAKE_CASE : List[str] = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = """cpu""" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : str = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _SCREAMING_SNAKE_CASE : Any = DDPMScheduler() _SCREAMING_SNAKE_CASE : Optional[int] = AudioDiffusionPipeline(vqvae=__snake_case , unet=self.dummy_unet , mel=__snake_case , scheduler=__snake_case ) _SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(42 ) _SCREAMING_SNAKE_CASE : List[Any] = pipe(generator=__snake_case , steps=4 ) _SCREAMING_SNAKE_CASE : int = output.audios[0] _SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0] _SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=__snake_case ).manual_seed(42 ) _SCREAMING_SNAKE_CASE : int = pipe(generator=__snake_case , steps=4 , return_dict=__snake_case ) _SCREAMING_SNAKE_CASE : int = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _SCREAMING_SNAKE_CASE : List[str] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _SCREAMING_SNAKE_CASE : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] _SCREAMING_SNAKE_CASE : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _SCREAMING_SNAKE_CASE : str = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler() _SCREAMING_SNAKE_CASE : str = self.dummy_vqvae_and_unet _SCREAMING_SNAKE_CASE : Dict = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__snake_case , scheduler=__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) np.random.seed(0 ) _SCREAMING_SNAKE_CASE : str = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _SCREAMING_SNAKE_CASE : int = torch.Generator(device=__snake_case ).manual_seed(42 ) _SCREAMING_SNAKE_CASE : Optional[int] = pipe(raw_audio=__snake_case , generator=__snake_case , start_step=5 , steps=10 ) _SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _SCREAMING_SNAKE_CASE : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_unet_condition _SCREAMING_SNAKE_CASE : str = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__snake_case , mel=__snake_case , scheduler=__snake_case ) _SCREAMING_SNAKE_CASE : Any = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) np.random.seed(0 ) _SCREAMING_SNAKE_CASE : Tuple = torch.rand((1, 1, 10) ) _SCREAMING_SNAKE_CASE : int = pipe(generator=__snake_case , encoding=__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] _SCREAMING_SNAKE_CASE : int = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Optional[Any] = torch_device _SCREAMING_SNAKE_CASE : str = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) _SCREAMING_SNAKE_CASE : Dict = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(42 ) _SCREAMING_SNAKE_CASE : Optional[Any] = pipe(generator=__snake_case ) _SCREAMING_SNAKE_CASE : Optional[Any] = output.audios[0] _SCREAMING_SNAKE_CASE : int = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _SCREAMING_SNAKE_CASE : Tuple = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _SCREAMING_SNAKE_CASE : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
200
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } a : Dict = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]: for attribute in key.split(""".""" ): UpperCAmelCase : Optional[int] = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: UpperCAmelCase : List[str] = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: UpperCAmelCase : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase : List[str] = value elif weight_type == "weight_g": UpperCAmelCase : str = value elif weight_type == "weight_v": UpperCAmelCase : int = value elif weight_type == "bias": UpperCAmelCase : Optional[Any] = value else: UpperCAmelCase : List[Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]: UpperCAmelCase : List[Any] = [] UpperCAmelCase : Optional[Any] = fairseq_model.state_dict() UpperCAmelCase : Dict = hf_model.feature_extractor UpperCAmelCase : int = hf_model.adapter for name, value in fairseq_dict.items(): UpperCAmelCase : int = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase : Any = True elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ): load_adapter(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCAmelCase : Any = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase : Optional[Any] = True if "*" in mapped_key: UpperCAmelCase : int = name.split(UpperCamelCase__ )[0].split(""".""" )[-2] UpperCAmelCase : List[str] = mapped_key.replace("""*""" , UpperCamelCase__ ) if "weight_g" in name: UpperCAmelCase : List[str] = """weight_g""" elif "weight_v" in name: UpperCAmelCase : Dict = """weight_v""" elif "bias" in name: UpperCAmelCase : Union[str, Any] = """bias""" elif "weight" in name: UpperCAmelCase : Optional[int] = """weight""" else: UpperCAmelCase : str = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]: UpperCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase : Tuple = name.split(""".""" ) UpperCAmelCase : Dict = int(items[0] ) UpperCAmelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) UpperCAmelCase : Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase__ ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = full_name.split("""adaptor.""" )[-1] UpperCAmelCase : int = name.split(""".""" ) if items[1].isdigit(): UpperCAmelCase : Optional[Any] = int(items[1] ) else: UpperCAmelCase : Optional[Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' UpperCAmelCase : List[str] = value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' UpperCAmelCase : Any = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' UpperCAmelCase : Tuple = value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' UpperCAmelCase : List[Any] = value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' UpperCAmelCase : List[Any] = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' UpperCAmelCase : Dict = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase__ ) def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase , UpperCAmelCase : str = emb.weight.shape UpperCAmelCase : List[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) UpperCAmelCase : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]: UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained( UpperCamelCase__ , add_adapter=UpperCamelCase__ , adapter_stride=UpperCamelCase__ , adapter_kernel_size=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , output_hidden_size=UpperCamelCase__ , ) UpperCAmelCase : Tuple = MBartConfig.from_pretrained(UpperCamelCase__ ) # load model UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ """config_yaml""": config_yaml_path, """data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path, """load_pretrained_decoder_from""": None, } , ) UpperCAmelCase : Union[str, Any] = model[0].eval() # load feature extractor UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ , use_auth_token=UpperCamelCase__ ) # set weights for wav2vec2 encoder UpperCAmelCase : List[str] = WavaVecaModel(UpperCamelCase__ ) recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ ) # load decoder weights UpperCAmelCase : Optional[Any] = MBartForCausalLM(UpperCamelCase__ ) UpperCAmelCase , UpperCAmelCase : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) UpperCAmelCase : Optional[int] = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) UpperCAmelCase : List[Any] = False UpperCAmelCase : Optional[int] = MBartaaTokenizer(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) UpperCAmelCase : Dict = hf_wavavec.config.to_dict() UpperCAmelCase : Tuple = tokenizer.pad_token_id UpperCAmelCase : int = tokenizer.bos_token_id UpperCAmelCase : List[str] = tokenizer.eos_token_id UpperCAmelCase : Optional[int] = """mbart50""" UpperCAmelCase : Optional[Any] = """wav2vec2""" UpperCAmelCase : int = tokenizer.eos_token_id UpperCAmelCase : Tuple = 2_5_0_0_0_4 UpperCAmelCase : Tuple = tokenizer.eos_token_id UpperCAmelCase : int = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ ) hf_wavavec.save_pretrained(UpperCamelCase__ ) feature_extractor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1_0_2_4, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=2_5_0_0_0_4, type=int, help="""`decoder_start_token_id` of model config""") a : Optional[int] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
265
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import socket def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowerCamelCase__ : Optional[int] = socket.gethostname() lowerCamelCase__ : Optional[Any] = 12312 sock.connect((host, port) ) sock.send(B"Hello server!" ) with open("Received_file" , "wb" ) as out_file: print("File opened" ) print("Receiving data..." ) while True: lowerCamelCase__ : Union[str, Any] = sock.recv(1024 ) if not data: break out_file.write(UpperCamelCase__ ) print("Successfully received the file" ) sock.close() print("Connection closed" ) if __name__ == "__main__": main()
184
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
'''simple docstring''' import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( lowercase_ ): def __init__( self : List[str] , *lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Dict ) ->Tuple: '''simple docstring''' super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : str = eval_examples _UpperCAmelCase : Union[str, Any] = post_process_function def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str = None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : Tuple = "eval" , **lowerCamelCase__ : int , ) ->Dict: '''simple docstring''' _UpperCAmelCase : str = gen_kwargs.copy() _UpperCAmelCase : Optional[Any] = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) _UpperCAmelCase : List[str] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) _UpperCAmelCase : Tuple = gen_kwargs _UpperCAmelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCAmelCase : Tuple = self.get_eval_dataloader(lowerCamelCase__ ) _UpperCAmelCase : Any = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase : Dict = self.compute_metrics _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Union[str, Any] = time.time() _UpperCAmelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCAmelCase : Dict = eval_loop( lowerCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , metric_key_prefix=lowerCamelCase__ , ) finally: _UpperCAmelCase : Any = compute_metrics _UpperCAmelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase__ , lowerCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _UpperCAmelCase : Any = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : List[str] = self.compute_metrics(lowerCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase : str = metrics.pop(lowerCamelCase__ ) metrics.update(output.metrics ) else: _UpperCAmelCase : Union[str, Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCAmelCase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase__ ) return metrics def lowerCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : str=None , lowerCamelCase__ : Tuple = "test" , **lowerCamelCase__ : Tuple ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : int = gen_kwargs.copy() _UpperCAmelCase : Tuple = self.get_test_dataloader(lowerCamelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase : Any = self.compute_metrics _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : List[Any] = time.time() _UpperCAmelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCAmelCase : List[str] = eval_loop( lowerCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , metric_key_prefix=lowerCamelCase__ , ) finally: _UpperCAmelCase : Dict = compute_metrics _UpperCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase__ , lowerCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _UpperCAmelCase : List[Any] = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , "predict" ) _UpperCAmelCase : str = self.compute_metrics(lowerCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase : str = metrics.pop(lowerCamelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase__ )
234
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowercase_ = (7_2_0, 1_2_8_0) # Height, Width lowercase_ = (0.4, 0.6) # if height or width lower than this scale, drop it. lowercase_ = 1 / 1_0_0 lowercase_ = """""" lowercase_ = """""" lowercase_ = """""" lowercase_ = 2_5_0 def a ( ) -> Dict: """simple docstring""" _lowercase , _lowercase =get_dataset(UpperCamelCase__ , UpperCamelCase__ ) for index in range(UpperCamelCase__ ): _lowercase =random.sample(range(len(UpperCamelCase__ ) ) , 4 ) _lowercase , _lowercase , _lowercase =update_image_and_anno( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , filter_scale=UpperCamelCase__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowercase =random_chars(32 ) _lowercase =path.split(os.sep )[-1].rsplit('.' , 1 )[0] _lowercase =F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(F'''{file_root}.jpg''' , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) _lowercase =[] for anno in new_annos: _lowercase =anno[3] - anno[1] _lowercase =anno[4] - anno[2] _lowercase =anno[1] + width / 2 _lowercase =anno[2] + height / 2 _lowercase =F'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(UpperCamelCase__ ) with open(F'''{file_root}.txt''' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def a ( A__ : Optional[int] , A__ : Any ) -> Any: """simple docstring""" _lowercase =[] _lowercase =[] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '*.txt' ) ): _lowercase =label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(UpperCamelCase__ ) as in_file: _lowercase =in_file.readlines() _lowercase =os.path.join(UpperCamelCase__ , F'''{label_name}.jpg''' ) _lowercase =[] for obj_list in obj_lists: _lowercase =obj_list.rstrip('\n' ).split(' ' ) _lowercase =float(obj[1] ) - float(obj[3] ) / 2 _lowercase =float(obj[2] ) - float(obj[4] ) / 2 _lowercase =float(obj[1] ) + float(obj[3] ) / 2 _lowercase =float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def a ( A__ : Tuple , A__ : List[str] , A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[Any] = 0.0 , ) -> int: """simple docstring""" _lowercase =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowercase =scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowercase =scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowercase =int(scale_x * output_size[1] ) _lowercase =int(scale_y * output_size[0] ) _lowercase =[] _lowercase =[] for i, index in enumerate(UpperCamelCase__ ): _lowercase =all_img_list[index] path_list.append(UpperCamelCase__ ) _lowercase =all_annos[index] _lowercase =cva.imread(UpperCamelCase__ ) if i == 0: # top-left _lowercase =cva.resize(UpperCamelCase__ , (divid_point_x, divid_point_y) ) _lowercase =img for bbox in img_annos: _lowercase =bbox[1] * scale_x _lowercase =bbox[2] * scale_y _lowercase =bbox[3] * scale_x _lowercase =bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowercase =cva.resize(UpperCamelCase__ , (output_size[1] - divid_point_x, divid_point_y) ) _lowercase =img for bbox in img_annos: _lowercase =scale_x + bbox[1] * (1 - scale_x) _lowercase =bbox[2] * scale_y _lowercase =scale_x + bbox[3] * (1 - scale_x) _lowercase =bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowercase =cva.resize(UpperCamelCase__ , (divid_point_x, output_size[0] - divid_point_y) ) _lowercase =img for bbox in img_annos: _lowercase =bbox[1] * scale_x _lowercase =scale_y + bbox[2] * (1 - scale_y) _lowercase =bbox[3] * scale_x _lowercase =scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowercase =cva.resize( UpperCamelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowercase =img for bbox in img_annos: _lowercase =scale_x + bbox[1] * (1 - scale_x) _lowercase =scale_y + bbox[2] * (1 - scale_y) _lowercase =scale_x + bbox[3] * (1 - scale_x) _lowercase =scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowercase =[ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def a ( A__ : Dict ) -> Union[str, Any]: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowercase =ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print('DONE ✅')
205
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
def _a ( UpperCamelCase_ : int ) -> int: """simple docstring""" assert column_title.isupper() lowerCAmelCase__ = 0 lowerCAmelCase__ = len(UpperCamelCase__ ) - 1 lowerCAmelCase__ = 0 while index >= 0: lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCamelCase__ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
340
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
from ..utils import DummyObject, requires_backends class a (metaclass=lowercase_ ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self : str , *lowerCamelCase : Any , **lowerCamelCase : Tuple ) -> Tuple: requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def __snake_case ( cls : Any , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> List[str]: requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def __snake_case ( cls : Any , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ) -> Dict: requires_backends(cls , ["transformers", "torch", "note_seq"] )
123
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A__ : List[Any] = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ A__ : Optional[Any] = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ A__ : int = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=4 , snake_case__ : Tuple=False ): lowerCamelCase_ : str =compute_bleu( reference_corpus=snake_case__ , translation_corpus=snake_case__ , max_order=snake_case__ , smooth=snake_case__ ) ((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) : List[Any] =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
144
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) set_seed(7_7_0) lowerCAmelCase__ = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } lowerCAmelCase__ = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__)) lowerCAmelCase__ = os.path.join(os.path.expanduser("""~"""), """.cache""") lowerCAmelCase__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any=False ) -> Union[str, Any]: '''simple docstring''' A__ = model_type if use_small: key += "_small" return os.path.join(UpperCamelCase__ , REMOTE_MODEL_PATHS[key]["file_name"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Optional[int]: '''simple docstring''' os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) hf_hub_download(repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , local_dir=UpperCamelCase__ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: str=False , SCREAMING_SNAKE_CASE_: int="text" ) -> str: '''simple docstring''' if model_type == "text": A__ = BarkSemanticModel A__ = BarkSemanticConfig A__ = BarkSemanticGenerationConfig elif model_type == "coarse": A__ = BarkCoarseModel A__ = BarkCoarseConfig A__ = BarkCoarseGenerationConfig elif model_type == "fine": A__ = BarkFineModel A__ = BarkFineConfig A__ = BarkFineGenerationConfig else: raise NotImplementedError() A__ = F'{model_type}_small' if use_small else model_type A__ = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(UpperCamelCase__ ): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info["repo_id"] , model_info["file_name"] ) A__ = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ ) # this is a hack A__ = checkpoint["model_args"] if "input_vocab_size" not in model_args: A__ = model_args["vocab_size"] A__ = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A__ = model_args.pop("n_head" ) A__ = model_args.pop("n_embd" ) A__ = model_args.pop("n_layer" ) A__ = ConfigClass(**checkpoint["model_args"] ) A__ = ModelClass(config=UpperCamelCase__ ) A__ = GenerationConfigClass() A__ = model_generation_config A__ = checkpoint["model"] # fixup checkpoint A__ = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(UpperCamelCase__ ): # replace part of the key with corresponding layer name in HF implementation A__ = k[len(UpperCamelCase__ ) :] for old_layer_name in new_layer_name_dict: A__ = new_k.replace(UpperCamelCase__ , new_layer_name_dict[old_layer_name] ) A__ = state_dict.pop(UpperCamelCase__ ) A__ = set(state_dict.keys() ) - set(model.state_dict().keys() ) A__ = {k for k in extra_keys if not k.endswith(".attn.bias" )} A__ = set(model.state_dict().keys() ) - set(state_dict.keys() ) A__ = {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(UpperCamelCase__ ) != 0: raise ValueError(F'extra keys found: {extra_keys}' ) if len(UpperCamelCase__ ) != 0: raise ValueError(F'missing keys: {missing_keys}' ) model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) A__ = model.num_parameters(exclude_embeddings=UpperCamelCase__ ) A__ = checkpoint["best_val_loss"].item() logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase__ , 3 )} loss' ) model.eval() model.to(UpperCamelCase__ ) del checkpoint, state_dict return model def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any]=False , SCREAMING_SNAKE_CASE_: Union[str, Any]="text" ) -> int: '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A__ = "cpu" # do conversion on cpu A__ = _get_ckpt_path(UpperCamelCase__ , use_small=UpperCamelCase__ ) A__ = _load_model(UpperCamelCase__ , UpperCamelCase__ , model_type=UpperCamelCase__ , use_small=UpperCamelCase__ ) # load bark initial model A__ = _bark_load_model(UpperCamelCase__ , "cpu" , model_type=UpperCamelCase__ , use_small=UpperCamelCase__ ) if model_type == "text": A__ = bark_model["model"] if model.num_parameters(exclude_embeddings=UpperCamelCase__ ) != bark_model.get_num_params(): raise ValueError("initial and new models don\'t have the same number of parameters" ) # check if same output as the bark model A__ = 5 A__ = 1_0 if model_type in ["text", "coarse"]: A__ = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int ) A__ = bark_model(UpperCamelCase__ )[0] A__ = model(UpperCamelCase__ ) # take last logits A__ = output_new_model_total.logits[:, [-1], :] else: A__ = 3 A__ = 8 A__ = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) A__ = model(UpperCamelCase__ , UpperCamelCase__ ) A__ = bark_model(UpperCamelCase__ , UpperCamelCase__ ) A__ = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don\'t have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("initial and new outputs are not equal" ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , ) -> List[Any]: '''simple docstring''' A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) A__ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase__ , "config.json" ) ) A__ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase__ , "config.json" ) ) A__ = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase__ , "config.json" ) ) A__ = EncodecConfig.from_pretrained("facebook/encodec_24khz" ) A__ = BarkSemanticModel.from_pretrained(UpperCamelCase__ ) A__ = BarkCoarseModel.from_pretrained(UpperCamelCase__ ) A__ = BarkFineModel.from_pretrained(UpperCamelCase__ ) A__ = EncodecModel.from_pretrained("facebook/encodec_24khz" ) A__ = BarkConfig.from_sub_model_configs( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) A__ = BarkModel(UpperCamelCase__ ) A__ = semantic A__ = coarseAcoustic A__ = fineAcoustic A__ = codec A__ = bark_generation_config Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) bark.save_pretrained(UpperCamelCase__ , repo_id=UpperCamelCase__ , push_to_hub=UpperCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") lowerCAmelCase__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
68
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( _snake_case : Dict ) -> int: '''simple docstring''' def is_in_circle(_snake_case : Any , _snake_case : List[str] ) -> bool: __magic_name__ : str = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ : int = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(UpperCamelCase__ ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ : int = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] = 0.0 , _snake_case : int = 1.0 , ) -> Optional[int]: '''simple docstring''' return mean( function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value) def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Union[str, Any] = 0.0 , _snake_case : Union[str, Any] = 1.0 ) -> Dict: '''simple docstring''' def identity_function(_snake_case : List[str] ) -> float: return x __magic_name__ : Optional[int] = area_under_curve_estimator( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ : str = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print("******************" ) def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' def function_to_integrate(_snake_case : Tuple ) -> float: return sqrt(4.0 - x * x ) __magic_name__ : Any = area_under_curve_estimator( UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
281
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase_ : def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Union[str, Any]=32 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=10 , __UpperCamelCase : Dict=[8, 16, 32, 64] , __UpperCamelCase : Optional[Any]=[1, 1, 2, 1] , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __UpperCamelCase : int=[2, 3, 4] , __UpperCamelCase : List[str]=1 , ) -> Tuple: _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = num_channels _UpperCamelCase = embeddings_size _UpperCamelCase = hidden_sizes _UpperCamelCase = depths _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_act _UpperCamelCase = num_labels _UpperCamelCase = scope _UpperCamelCase = len(__UpperCamelCase ) _UpperCamelCase = out_features _UpperCamelCase = out_indices _UpperCamelCase = num_groups def _UpperCamelCase ( self : str ) -> Tuple: _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCamelCase = self.get_config() return config, pixel_values, labels def _UpperCamelCase ( self : List[str] ) -> Optional[Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _UpperCamelCase ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Any: _UpperCamelCase = BitModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _UpperCamelCase ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Union[str, Any]: _UpperCamelCase = self.num_labels _UpperCamelCase = BitForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> Optional[Any]: _UpperCamelCase = BitBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _UpperCamelCase = None _UpperCamelCase = BitBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCamelCase = model(__UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _UpperCamelCase ( self : Dict ) -> str: _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase_ , lowercase_ , unittest.TestCase): snake_case__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () snake_case__ = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False def _UpperCamelCase ( self : List[str] ) -> str: _UpperCamelCase = BitModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase ( self : List[Any] ) -> Tuple: return @unittest.skip(reason='''Bit does not output attentions''' ) def _UpperCamelCase ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _UpperCamelCase ( self : Dict ) -> List[str]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _UpperCamelCase ( self : Any ) -> Tuple: pass def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__UpperCamelCase ) _UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def _UpperCamelCase ( self : List[Any] ) -> str: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCamelCase ( self : Optional[int] ) -> str: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCamelCase ) def _UpperCamelCase ( self : Optional[int] ) -> Tuple: _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(config=__UpperCamelCase ) for name, module in model.named_modules(): if isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _UpperCamelCase ( self : Tuple ) -> Optional[int]: def check_hidden_states_output(__UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ): _UpperCamelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase = layer_type _UpperCamelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: pass def _UpperCamelCase ( self : Dict ) -> Union[str, Any]: _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def _UpperCamelCase ( self : Tuple ) -> Any: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = BitModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase ( ) -> Dict: _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _UpperCamelCase ( self : List[str] ) -> Tuple: _UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__UpperCamelCase ) # verify the logits _UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) @require_torch class UpperCAmelCase_ ( lowercase_ , unittest.TestCase): snake_case__ = (BitBackbone,) if is_torch_available() else () snake_case__ = BitConfig snake_case__ = False def _UpperCamelCase ( self : List[str] ) -> Dict: _UpperCamelCase = BitModelTester(self )
256
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Optional[int] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
200
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
'''simple docstring''' import numpy class UpperCamelCase_ : def __init__( self , A , A ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCAmelCase : List[Any] = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCAmelCase : Union[str, Any] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCAmelCase : Any = numpy.random.rand(3 , 1 ) # Real output values provided. UpperCAmelCase : Dict = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCAmelCase : str = numpy.zeros(output_array.shape ) def _lowercase( self ) -> Tuple: UpperCAmelCase : List[Any] = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCAmelCase : List[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCAmelCase : str = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowercase( self ) -> List[Any]: UpperCAmelCase : Dict = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) UpperCAmelCase : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) UpperCAmelCase : Tuple = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowercase( self , A , A , A ) -> List[Any]: for iteration in range(1 , iterations + 1 ): UpperCAmelCase : str = self.feedforward() self.back_propagation() if give_loss: UpperCAmelCase : int = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowercase( self , A ) -> Tuple: UpperCAmelCase : Any = input_arr UpperCAmelCase : Dict = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) UpperCAmelCase : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) UpperCAmelCase : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __lowerCamelCase ( _lowercase ) -> Tuple: return 1 / (1 + numpy.exp(-value )) def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return (value) * (1 - (value)) def __lowerCamelCase ( ) -> Optional[Any]: UpperCAmelCase : str = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. UpperCAmelCase : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. UpperCAmelCase : Any = TwoHiddenLayerNeuralNetwork( input_array=UpperCamelCase__ , output_array=UpperCamelCase__ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=UpperCamelCase__ , iterations=1_0 , give_loss=UpperCamelCase__ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
265
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler A : List[str] = 16 A : Union[str, Any] = 32 def lowercase_ ( _A : str ): """simple docstring""" return int(x / 2**20 ) class _lowercase : """simple docstring""" def __enter__( self : str ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCamelCase__ : int = torch.cuda.memory_allocated() return self def __exit__( self : Dict , *__lowerCamelCase : Any ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() lowerCamelCase__ : Dict = torch.cuda.memory_allocated() lowerCamelCase__ : Tuple = torch.cuda.max_memory_allocated() lowerCamelCase__ : int = bamb(self.end - self.begin ) lowerCamelCase__ : Union[str, Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowercase_ ( _A : Any , _A : Any = 16 , _A : List[str] = "bert-base-cased" , _A : Optional[int] = 320 , _A : Optional[Any] = 160 , ): """simple docstring""" lowerCamelCase__ : str = AutoTokenizer.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = load_dataset( "glue" , "mrpc" , split={"train": F"train[:{n_train}]", "validation": F"validation[:{n_val}]"} ) def tokenize_function(_A : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase__ : Optional[int] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_A : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowerCamelCase__ : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = DataLoader( tokenized_datasets["validation"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def lowercase_ ( _A : Tuple , _A : str ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ : Optional[Any] = config["lr"] lowerCamelCase__ : Union[str, Any] = int(config["num_epochs"] ) lowerCamelCase__ : Optional[Any] = int(config["seed"] ) lowerCamelCase__ : List[str] = int(config["batch_size"] ) lowerCamelCase__ : Any = args.model_name_or_path set_seed(UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ : int = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) # Instantiate optimizer lowerCamelCase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase__ : int = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Dict = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase__ : List[str] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , ) else: lowerCamelCase__ : List[Any] = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase__ : Optional[Any] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase__ : Optional[Any] = 0 # Now we train the model lowerCamelCase__ : Any = {} for epoch in range(UpperCamelCase__ , UpperCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(UpperCamelCase__ ): lowerCamelCase__ : Any = model(**UpperCamelCase__ ) lowerCamelCase__ : Any = outputs.loss lowerCamelCase__ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCamelCase__ : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=UpperCamelCase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCamelCase__ , ) parser.add_argument( "--output_dir" , type=UpperCamelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=UpperCamelCase__ , default=320 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=UpperCamelCase__ , default=160 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=UpperCamelCase__ , default=1 , help="Number of train epochs." , ) lowerCamelCase__ : Optional[int] = parser.parse_args() lowerCamelCase__ : str = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
184
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = TypeVar('DatasetType', Dataset, IterableDataset) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase__ ) )}\']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Tuple = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , ): if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase__ ) )}\']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Any = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
234
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase ): def A__ ( self ) -> int: '''simple docstring''' _lowercase =tempfile.mkdtemp() _lowercase =[ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] _lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowercase ={ 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48145466, 0.4578275, 0.40821073], 'image_std': [0.26862954, 0.26130258, 0.27577711], 'do_convert_rgb': True, } _lowercase =os.path.join(self.tmpdirname , lowerCAmelCase ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowerCAmelCase , lowerCAmelCase ) def A__ ( self , **lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def A__ ( self , **lowerCAmelCase ) -> str: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def A__ ( self , **lowerCAmelCase ) -> Any: '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def A__ ( self ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' _lowercase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowercase =[Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =self.get_tokenizer() _lowercase =self.get_rust_tokenizer() _lowercase =self.get_image_processor() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase ) _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase ) def A__ ( self ) -> str: '''simple docstring''' _lowercase =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase =self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) _lowercase =self.get_image_processor(do_normalize=lowerCAmelCase ) _lowercase =ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=lowerCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase ) def A__ ( self ) -> Any: '''simple docstring''' _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) _lowercase =self.prepare_image_inputs() _lowercase =image_processor(lowerCAmelCase , return_tensors='np' ) _lowercase =processor(images=lowerCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) _lowercase ='Alexandra,T-shirt的价格是15便士。' _lowercase =processor(text=lowerCAmelCase ) _lowercase =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) _lowercase ='Alexandra,T-shirt的价格是15便士。' _lowercase =self.prepare_image_inputs() _lowercase =processor(text=lowerCAmelCase , images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase ): processor() def A__ ( self ) -> str: '''simple docstring''' _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) _lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase =processor.batch_decode(lowerCAmelCase ) _lowercase =tokenizer.batch_decode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def A__ ( self ) -> List[str]: '''simple docstring''' _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase ) _lowercase ='Alexandra,T-shirt的价格是15便士。' _lowercase =self.prepare_image_inputs() _lowercase =processor(text=lowerCAmelCase , images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
205
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
def _a ( UpperCamelCase_ : Tuple ) -> Tuple: """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
340
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params _snake_case : Tuple = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def lowerCAmelCase_ ( __lowerCamelCase ): for pegasus_name, hf_name in PATTERNS: __snake_case : int = k.replace(UpperCamelCase__ , UpperCamelCase__ ) return k def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : List[str] = DEFAULTS.copy() cfg_kwargs.update(UpperCamelCase__ ) __snake_case : Tuple = PegasusConfig(**UpperCamelCase__ ) __snake_case : Union[str, Any] = PegasusForConditionalGeneration(UpperCamelCase__ ) __snake_case : Optional[Any] = torch_model.model.state_dict() __snake_case : Optional[Any] = {} for k, v in tf_weights.items(): __snake_case : str = rename_state_dict_key(UpperCamelCase__ ) if new_k not in sd: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if "dense" in k or "proj" in new_k: __snake_case : int = v.T __snake_case : Optional[int] = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}' # make sure embedding.padding_idx is respected __snake_case : List[Any] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) __snake_case : Dict = mapping["shared.weight"] __snake_case : Dict = mapping["shared.weight"] __snake_case : Dict = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**UpperCamelCase__ ) __snake_case , __snake_case : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) __snake_case : Optional[Any] = [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def lowerCAmelCase_ ( __lowerCamelCase="./ckpt/aeslc/model.ckpt-32000" ): __snake_case : List[Any] = tf.train.list_variables(UpperCamelCase__ ) __snake_case : Optional[int] = {} __snake_case : Optional[Any] = ["Adafactor", "global_step"] for name, shape in tqdm(UpperCamelCase__ , desc="converting tf checkpoint to dict" ): __snake_case : List[str] = any(pat in name for pat in ignore_name ) if skip_key: continue __snake_case : int = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ ) __snake_case : int = array return tf_weights def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : List[str] = Path(UpperCamelCase__ ).parent.name __snake_case : List[Any] = task_specific_params[F'summarization_{dataset}']["max_position_embeddings"] __snake_case : Optional[Any] = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCamelCase__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCamelCase__ ) # convert model __snake_case : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ ) __snake_case : List[str] = task_specific_params[F'summarization_{dataset}'] if dataset == "large": __snake_case : List[str] = task_specific_params __snake_case : Any = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ ) torch_model.save_pretrained(UpperCamelCase__ ) __snake_case : int = torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / "pytorch_model.bin" ) if __name__ == "__main__": _snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") _snake_case : List[str] = parser.parse_args() if args.save_dir is None: _snake_case : str = Path(args.tf_ckpt_path).parent.name _snake_case : List[Any] = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
123
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor A__ : int = logging.get_logger(__name__) class lowercase__ ( lowercase_ ): def __init__( self : List[Any] , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
144
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Optional[Any]: '''simple docstring''' return 1_0 - x * x def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]: '''simple docstring''' if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0: raise ValueError("Wrong space!" ) A__ = a while (b - a) >= 0.01: # Find middle point A__ = (a + b) / 2 # Check if middle point is root if equation(UpperCamelCase__ ) == 0.0: break # Decide the side to repeat the steps if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0: A__ = c else: A__ = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
68
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _snake_case : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ): __magic_name__ : List[str] = parent __magic_name__ : Union[str, Any] = 13 __magic_name__ : Tuple = 7 __magic_name__ : List[str] = True __magic_name__ : Optional[int] = True __magic_name__ : List[Any] = True __magic_name__ : str = True __magic_name__ : Dict = 99 __magic_name__ : Optional[int] = 32 __magic_name__ : List[str] = 2 __magic_name__ : Any = 4 __magic_name__ : Tuple = 37 __magic_name__ : List[Any] = "gelu" __magic_name__ : Tuple = 0.1 __magic_name__ : int = 0.1 __magic_name__ : List[str] = 512 __magic_name__ : Union[str, Any] = 16 __magic_name__ : Dict = 2 __magic_name__ : str = 0.02 __magic_name__ : str = 3 __magic_name__ : str = 4 __magic_name__ : str = None def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : int = None if self.use_input_mask: __magic_name__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : Optional[Any] = None if self.use_token_type_ids: __magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Optional[int] = None __magic_name__ : Tuple = None __magic_name__ : List[Any] = None if self.use_labels: __magic_name__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : Any = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Dict = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : int = TFRoFormerModel(config=_a ) __magic_name__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __magic_name__ : Dict = [input_ids, input_mask] __magic_name__ : int = model(_a ) __magic_name__ : List[Any] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : str = True __magic_name__ : Union[str, Any] = TFRoFormerForCausalLM(config=_a ) __magic_name__ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __magic_name__ : Union[str, Any] = model(_a )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : Optional[Any] = TFRoFormerForMaskedLM(config=_a ) __magic_name__ : Optional[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __magic_name__ : Union[str, Any] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : Optional[Any] = self.num_labels __magic_name__ : int = TFRoFormerForSequenceClassification(config=_a ) __magic_name__ : Optional[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __magic_name__ : int = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : Tuple = self.num_choices __magic_name__ : Dict = TFRoFormerForMultipleChoice(config=_a ) __magic_name__ : Optional[int] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : int = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Any = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __magic_name__ : List[Any] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : int = self.num_labels __magic_name__ : int = TFRoFormerForTokenClassification(config=_a ) __magic_name__ : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __magic_name__ : List[Any] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ): __magic_name__ : Dict = TFRoFormerForQuestionAnswering(config=_a ) __magic_name__ : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __magic_name__ : int = model(_a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = TFRoFormerModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=_a , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(_a ) @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __magic_name__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : Optional[Any] = model(_a )[0] # TODO Replace vocab size __magic_name__ : List[str] = 50_000 __magic_name__ : List[str] = [1, 6, vocab_size] self.assertEqual(output.shape , _a ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __magic_name__ : str = tf.constant( [ [ [-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46], [-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07], [-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 ) @require_tf class _snake_case ( unittest.TestCase ): UpperCamelCase__ = 1e-4 def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = tf.constant([[4, 10]] ) __magic_name__ : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __magic_name__ : Optional[int] = emba(input_ids.shape ) __magic_name__ : Dict = tf.constant( [[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] ) tf.debugging.assert_near(_a , _a , atol=self.tolerance ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = tf.constant( [ [0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00], [0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17], [0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70], ] ) __magic_name__ : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) __magic_name__ : str = emba.weight[:3, :5] tf.debugging.assert_near(_a , _a , atol=self.tolerance ) @require_tf class _snake_case ( unittest.TestCase ): UpperCamelCase__ = 1e-4 def SCREAMING_SNAKE_CASE ( self ): # 2,12,16,64 __magic_name__ : int = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __magic_name__ : Optional[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __magic_name__ : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) __magic_name__ : Tuple = embed_positions([2, 16, 768] )[None, None, :, :] __magic_name__ , __magic_name__ : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings( _a , _a , _a ) __magic_name__ : List[Any] = tf.constant( [ [0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00], [-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43], [-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85], [-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71], [0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80], [3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53], ] ) __magic_name__ : int = tf.constant( [ [0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00], [0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43], [1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85], [2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71], [-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80], [-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
281
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCAmelCase = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase ( a__ : Dict ) -> List[Any]: if "://" in dataset_path: _UpperCamelCase = dataset_path.split('''://''' )[1] return dataset_path def lowercase ( a__ : int ) -> Any: if fs is not None and fs.protocol != "file": return True else: return False def lowercase ( a__ : Optional[int] , a__ : Tuple , a__ : int ) -> Union[str, Any]: _UpperCamelCase = not is_remote_filesystem(UpperCamelCase__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(UpperCamelCase__ ) , fs._strip_protocol(UpperCamelCase__ ) ) else: fs.mv(UpperCamelCase__ , UpperCamelCase__ , recursive=UpperCamelCase__ ) def lowercase ( ) -> Optional[Any]: if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = threading.Lock()
256
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def snake_case_ ( SCREAMING_SNAKE_CASE__ = 3 ): """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(UpperCamelCase__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) _SCREAMING_SNAKE_CASE : Dict = QuantumRegister(UpperCamelCase__ , """qr""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ClassicalRegister(UpperCamelCase__ , """cr""" ) _SCREAMING_SNAKE_CASE : Any = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = number_of_qubits for i in range(UpperCamelCase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ ) # simulate with 10000 shots _SCREAMING_SNAKE_CASE : List[Any] = Aer.get_backend("""qasm_simulator""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0000 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}" )
200
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , A , A=3 , A=32 , A=3 , A=10 , A=[10, 20, 30, 40] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> Dict: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : Union[str, Any] = batch_size UpperCAmelCase : Dict = image_size UpperCAmelCase : int = num_channels UpperCAmelCase : Any = embeddings_size UpperCAmelCase : Optional[Any] = hidden_sizes UpperCAmelCase : Optional[int] = depths UpperCAmelCase : List[Any] = is_training UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : str = hidden_act UpperCAmelCase : List[str] = num_labels UpperCAmelCase : Union[str, Any] = scope UpperCAmelCase : Tuple = len(A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : int = self.get_config() return config, pixel_values def _lowercase( self ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _lowercase( self , A , A ) -> List[Any]: UpperCAmelCase : List[Any] = FlaxRegNetModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowercase( self , A , A ) -> Any: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : str = FlaxRegNetForImageClassification(config=A ) UpperCAmelCase : Dict = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase_ ( lowercase_ , unittest.TestCase ): lowercase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = FlaxRegNetModelTester(self ) UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A ) def _lowercase( self ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase( self ) -> int: return def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def _lowercase( self ) -> Optional[Any]: pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Dict: UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(A ) UpperCAmelCase : Tuple = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : int = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A ) def _lowercase( self ) -> List[str]: def check_hidden_states_output(A , A , A ): UpperCAmelCase : List[str] = model_class(A ) UpperCAmelCase : str = model(**self._prepare_for_class(A , A ) ) UpperCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : Tuple = self.model_tester.num_stages self.assertEqual(len(A ) , expected_num_stages + 1 ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Tuple = True check_hidden_states_output(A , A , A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase : int = self._prepare_for_class(A , A ) UpperCAmelCase : int = model_class(A ) @jax.jit def model_jitted(A , **A ): return model(pixel_values=A , **A ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase : List[Any] = model_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase : List[str] = model_jitted(**A ).to_tuple() self.assertEqual(len(A ) , len(A ) ) for jitted_output, output in zip(A , A ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( ) -> Any: UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCamelCase_ ( unittest.TestCase ): @cached_property def _lowercase( self ) -> List[str]: return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) UpperCAmelCase : Any = self.default_image_processor UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : str = image_processor(images=A , return_tensors="""np""" ) UpperCAmelCase : List[str] = model(**A ) # verify the logits UpperCAmelCase : Union[str, Any] = (1, 1000) self.assertEqual(outputs.logits.shape , A ) UpperCAmelCase : int = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
265
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
from collections.abc import Callable import numpy as np def lowercase_ ( _A : str , _A : Optional[Any] , _A : Any , _A : Optional[int] , _A : Optional[Any] ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) lowerCamelCase__ : Union[str, Any] = np.zeros((n + 1,) ) lowerCamelCase__ : List[str] = ya lowerCamelCase__ : List[str] = xa for k in range(UpperCamelCase__ ): lowerCamelCase__ : Any = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] ) lowerCamelCase__ : List[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCamelCase__ , y[k] ) + ode_func(x + step_size , UpperCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
184
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : str = json.load(UpperCamelCase__ ) _UpperCAmelCase : int = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase__ , map_location="cpu" )["module"] # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] _UpperCAmelCase : Optional[Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : str = AddedToken("<ent>" , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Tuple = AddedToken("<ent2>" , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "r" ) as f: _UpperCAmelCase : List[Any] = json.load(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = "MLukeTokenizer" with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : str = MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0] _UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(["#"] )[0] _UpperCAmelCase : Optional[Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCAmelCase : int = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase : List[Any] = state_dict[bias_name] _UpperCAmelCase : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase : str = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : Optional[Any] = F"""encoder.layer.{layer_index}.attention.self.""" _UpperCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] _UpperCAmelCase : List[Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : str = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCAmelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCAmelCase : List[str] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase : int = state_dict["entity_predictions.bias"] _UpperCAmelCase : str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) _UpperCAmelCase : Any = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase : Union[str, Any] = LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) _UpperCAmelCase : Any = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): _UpperCAmelCase : Dict = state_dict[key] else: _UpperCAmelCase : Optional[Any] = state_dict[key] _UpperCAmelCase , _UpperCAmelCase : List[str] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase : Any = MLukeTokenizer.from_pretrained(UpperCamelCase__ , task="entity_classification" ) _UpperCAmelCase : Union[str, Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." _UpperCAmelCase : Union[str, Any] = (0, 9) _UpperCAmelCase : int = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors="pt" ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase : Dict = torch.Size((1, 33, 768) ) _UpperCAmelCase : Dict = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase : int = torch.Size((1, 1, 768) ) _UpperCAmelCase : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase : Tuple = MLukeTokenizer.from_pretrained(UpperCamelCase__ ) _UpperCAmelCase : Tuple = "Tokyo is the capital of <mask>." _UpperCAmelCase : Union[str, Any] = (24, 30) _UpperCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors="pt" ) _UpperCAmelCase : Any = model(**UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = encoding["input_ids"][0].tolist() _UpperCAmelCase : List[str] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) _UpperCAmelCase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) _UpperCAmelCase : Any = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase : Dict = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = ["[MASK]", "[PAD]", "[UNK]"] _UpperCAmelCase : Tuple = [json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] _UpperCAmelCase : Tuple = {} for entry in data: _UpperCAmelCase : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase : int = entity_id break _UpperCAmelCase : Union[str, Any] = F"""{language}:{entity_name}""" _UpperCAmelCase : Optional[int] = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
234
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
def a ( A__ : int ) -> List[Any]: """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError('only integers accepted as input' ) else: _lowercase =str(abs(UpperCamelCase__ ) ) _lowercase =[list(UpperCamelCase__ ) for char in range(len(UpperCamelCase__ ) )] for index in range(len(UpperCamelCase__ ) ): num_transpositions[index].pop(UpperCamelCase__ ) return max( int(''.join(list(UpperCamelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
205
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
import random def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], [] for element in data: if element < pivot: less.append(UpperCamelCase__ ) elif element > pivot: greater.append(UpperCamelCase__ ) else: equal.append(UpperCamelCase__ ) return less, equal, greater def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] ) -> List[str]: """simple docstring""" if index >= len(UpperCamelCase__ ) or index < 0: return None lowerCAmelCase__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )] lowerCAmelCase__ = 0 lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase__ = len(UpperCamelCase__ ) lowerCAmelCase__ = len(UpperCamelCase__ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(UpperCamelCase__ , UpperCamelCase__ ) # must be in larger else: return quick_select(UpperCamelCase__ , index - (m + count) )
340
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _snake_case : Optional[int] = """scheduler_config.json""" class a (lowercase_ ): """simple docstring""" __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : Optional[int] = 5 @dataclass class a (lowercase_ ): """simple docstring""" __UpperCAmelCase : jnp.ndarray class a : """simple docstring""" __UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME __UpperCAmelCase : Tuple = ['''dtype'''] __UpperCAmelCase : Any = [] __UpperCAmelCase : int = True @classmethod def __snake_case ( cls : Union[str, Any] , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Tuple = None , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : Optional[int] , ) -> Dict: __snake_case , __snake_case : Union[str, Any] = cls.load_config( pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , ) __snake_case , __snake_case : List[Any] = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase ) if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ): __snake_case : Any = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __snake_case ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : int = False , **lowerCamelCase : Tuple ) -> str: self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase ) @property def __snake_case ( self : Optional[Any] ) -> int: return self._get_compatibles() @classmethod def __snake_case ( cls : int ) -> Tuple: __snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) ) __snake_case : int = importlib.import_module(__name__.split("." )[0] ) __snake_case : List[str] = [ getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase ) ] return compatible_classes def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): assert len(UpperCamelCase__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ): def alpha_bar(__lowerCamelCase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 __snake_case : List[str] = [] for i in range(UpperCamelCase__ ): __snake_case : Tuple = i / num_diffusion_timesteps __snake_case : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) ) return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ ) @flax.struct.dataclass class a : """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray @classmethod def __snake_case ( cls : int , lowerCamelCase : Optional[int] ) -> Union[str, Any]: __snake_case : Tuple = scheduler.config if config.trained_betas is not None: __snake_case : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case : Optional[Any] = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) __snake_case : str = 1.0 - betas __snake_case : List[str] = jnp.cumprod(lowerCamelCase , axis=0 ) return cls( alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : List[Any] = state.alphas_cumprod __snake_case : Optional[Any] = alphas_cumprod[timesteps] ** 0.5 __snake_case : Optional[int] = sqrt_alpha_prod.flatten() __snake_case : List[str] = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape ) __snake_case : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5 __snake_case : Optional[int] = sqrt_one_minus_alpha_prod.flatten() __snake_case : Union[str, Any] = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case , __snake_case : Any = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __snake_case : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case , __snake_case : str = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __snake_case : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
123
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef A__ : Dict = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Tuple: warnings.warn(UpperCamelCase__ , UpperCamelCase__ ) requires_backends(UpperCamelCase__ , "sklearn" ) return (preds == labels).mean() def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> List[Any]: warnings.warn(UpperCamelCase__ , UpperCamelCase__ ) requires_backends(UpperCamelCase__ , "sklearn" ) lowerCamelCase_ : List[Any] =simple_accuracy(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ : str =fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> Any: warnings.warn(UpperCamelCase__ , UpperCamelCase__ ) requires_backends(UpperCamelCase__ , "sklearn" ) lowerCamelCase_ : str =pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] lowerCamelCase_ : Optional[Any] =spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ) -> List[Any]: warnings.warn(UpperCamelCase__ , UpperCamelCase__ ) requires_backends(UpperCamelCase__ , "sklearn" ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), F"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "mrpc": return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ ) elif task_name == "sts-b": return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ ) elif task_name == "qqp": return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "qnli": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "rte": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "wnli": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} elif task_name == "hans": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} else: raise KeyError(UpperCamelCase__ ) def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> Optional[Any]: warnings.warn(UpperCamelCase__ , UpperCamelCase__ ) requires_backends(UpperCamelCase__ , "sklearn" ) if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError(F"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} else: raise KeyError(UpperCamelCase__ )
144
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
import qiskit def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] = 2 ) -> str: '''simple docstring''' A__ = qubits # Using Aer's simulator A__ = qiskit.Aer.get_backend("aer_simulator" ) # Creating a Quantum Circuit acting on the q register A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , UpperCamelCase__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , UpperCamelCase__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0_0_0 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print(f"""Total count for various states are: {quantum_entanglement(3)}""")
68
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase_ ( _snake_case : Tuple ) -> Union[str, Any]: '''simple docstring''' if "img_encoder.pos_embed" in name: __magic_name__ : Optional[Any] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: __magic_name__ : Union[str, Any] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: __magic_name__ : str = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: __magic_name__ : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: __magic_name__ : List[Any] = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: __magic_name__ : int = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: __magic_name__ : int = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: __magic_name__ : List[Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: __magic_name__ : str = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: __magic_name__ : List[str] = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: __magic_name__ : Any = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: __magic_name__ : str = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: __magic_name__ : Optional[int] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: __magic_name__ : List[str] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: __magic_name__ : Dict = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: __magic_name__ : Any = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: __magic_name__ : Dict = name.replace("c_fc" , "fc1" ) if "c_proj" in name: __magic_name__ : Union[str, Any] = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: __magic_name__ : Union[str, Any] = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: __magic_name__ : Optional[int] = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: __magic_name__ : int = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: __magic_name__ : str = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: __magic_name__ : Any = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: __magic_name__ : Any = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any ) -> Optional[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ : Tuple = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __magic_name__ : Tuple = key.split("." ) __magic_name__ , __magic_name__ : List[str] = int(key_split[2] ), int(key_split[4] ) __magic_name__ : Optional[int] = config.vision_config.hidden_size if "weight" in key: __magic_name__ : Optional[int] = val[:dim, :] __magic_name__ : List[Any] = val[dim : dim * 2, :] __magic_name__ : Optional[int] = val[-dim:, :] else: __magic_name__ : Dict = val[:dim] __magic_name__ : Optional[int] = val[dim : dim * 2] __magic_name__ : List[str] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __magic_name__ : int = key.split("." ) __magic_name__ : List[str] = int(key_split[3] ) __magic_name__ : Optional[Any] = config.text_config.hidden_size if "weight" in key: __magic_name__ : Dict = val[:dim, :] __magic_name__ : Tuple = val[ dim : dim * 2, : ] __magic_name__ : List[Any] = val[-dim:, :] else: __magic_name__ : Union[str, Any] = val[:dim] __magic_name__ : Optional[int] = val[dim : dim * 2] __magic_name__ : Dict = val[-dim:] else: __magic_name__ : Optional[Any] = rename_key(UpperCamelCase__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __magic_name__ : Optional[int] = val.squeeze_() else: __magic_name__ : Dict = val return orig_state_dict def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' __magic_name__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __magic_name__ : str = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : str="groupvit-gcc-yfcc" , _snake_case : str=False ) -> str: '''simple docstring''' __magic_name__ : Optional[int] = GroupViTConfig() __magic_name__ : Tuple = GroupViTModel(UpperCamelCase__ ).eval() __magic_name__ : str = torch.load(UpperCamelCase__ , map_location="cpu" )["model"] __magic_name__ : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ , __magic_name__ : Optional[int] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase__ ) == 0) # verify result __magic_name__ : int = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) __magic_name__ : List[Any] = prepare_img() __magic_name__ : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" ) with torch.no_grad(): __magic_name__ : Optional[int] = model(**UpperCamelCase__ ) if model_name == "groupvit-gcc-yfcc": __magic_name__ : str = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": __magic_name__ : List[Any] = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) print("Successfully saved processor and model to" , UpperCamelCase__ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase__ , organization="nielsr" ) model.push_to_hub(UpperCamelCase__ , organization="nielsr" ) if __name__ == "__main__": snake_case : str = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) snake_case : str = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
281
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCAmelCase_ ( lowercase_): snake_case__ = '''vivit''' def __init__( self : Optional[int] , __UpperCamelCase : Tuple=224 , __UpperCamelCase : str=32 , __UpperCamelCase : Union[str, Any]=[2, 16, 16] , __UpperCamelCase : str=3 , __UpperCamelCase : Any=768 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : str=3072 , __UpperCamelCase : Any="gelu_fast" , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1E-06 , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : str , ) -> List[Any]: _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = image_size _UpperCamelCase = num_frames _UpperCamelCase = tubelet_size _UpperCamelCase = num_channels _UpperCamelCase = qkv_bias super().__init__(**__UpperCamelCase )
256
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
200
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a : Dict = logging.get_logger(__name__) class UpperCamelCase_ : def __init__( self , A = None , A = None , A=None , A=None ) -> Union[str, Any]: if not conversation_id: UpperCAmelCase : Optional[Any] = uuid.uuida() if past_user_inputs is None: UpperCAmelCase : List[Any] = [] if generated_responses is None: UpperCAmelCase : str = [] UpperCAmelCase : Any = conversation_id UpperCAmelCase : List[str] = past_user_inputs UpperCAmelCase : str = generated_responses UpperCAmelCase : Any = text def __eq__( self , A ) -> str: if not isinstance(A , A ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _lowercase( self , A , A = False ) -> Optional[int]: if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) UpperCAmelCase : Union[str, Any] = text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: UpperCAmelCase : List[Any] = text def _lowercase( self ) -> List[str]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) UpperCAmelCase : str = None def _lowercase( self , A ) -> Optional[int]: self.generated_responses.append(A ) def _lowercase( self ) -> Any: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): UpperCAmelCase : str = """user""" if is_user else """bot""" output += f'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class UpperCamelCase_ ( lowercase_ ): def __init__( self , *A , **A ) -> Union[str, Any]: super().__init__(*A , **A ) if self.tokenizer.pad_token_id is None: UpperCAmelCase : str = self.tokenizer.eos_token def _lowercase( self , A=None , A=None , A=None , **A ) -> Optional[Any]: UpperCAmelCase : Tuple = {} UpperCAmelCase : Tuple = {} UpperCAmelCase : Any = {} if min_length_for_response is not None: UpperCAmelCase : Tuple = min_length_for_response if minimum_tokens is not None: UpperCAmelCase : List[Any] = minimum_tokens if "max_length" in generate_kwargs: UpperCAmelCase : Any = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: UpperCAmelCase : Optional[Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(A ) return preprocess_params, forward_params, postprocess_params def __call__( self , A , A=0 , **A ) -> str: UpperCAmelCase : str = super().__call__(A , num_workers=A , **A ) if isinstance(A , A ) and len(A ) == 1: return outputs[0] return outputs def _lowercase( self , A , A=32 ) -> List[Any]: if not isinstance(A , A ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' """Add user inputs with the conversation\'s `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): UpperCAmelCase : str = self.tokenizer._build_conversation_input_ids(A ) else: # If the tokenizer cannot handle conversations, we default to only the old version UpperCAmelCase : List[Any] = self._legacy_parse_and_tokenize(A ) if self.framework == "pt": UpperCAmelCase : str = torch.LongTensor([input_ids] ) elif self.framework == "tf": UpperCAmelCase : Optional[int] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _lowercase( self , A , A=10 , **A ) -> Dict: UpperCAmelCase : Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) UpperCAmelCase : List[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) UpperCAmelCase : int = max_length - minimum_tokens UpperCAmelCase : Dict = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: UpperCAmelCase : Optional[Any] = model_inputs["""attention_mask"""][:, -trim:] UpperCAmelCase : Optional[Any] = model_inputs.pop("""conversation""" ) UpperCAmelCase : Any = max_length UpperCAmelCase : Optional[Any] = self.model.generate(**A , **A ) if self.model.config.is_encoder_decoder: UpperCAmelCase : List[str] = 1 else: UpperCAmelCase : List[str] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _lowercase( self , A , A=True ) -> Dict: UpperCAmelCase : Tuple = model_outputs["""output_ids"""] UpperCAmelCase : Optional[Any] = self.tokenizer.decode( output_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) UpperCAmelCase : str = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(A ) return conversation def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token_id UpperCAmelCase : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) ) if len(A ) > self.tokenizer.model_max_length: UpperCAmelCase : Optional[int] = input_ids[-self.tokenizer.model_max_length :] return input_ids
265
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict = True , __lowerCamelCase : str = None , __lowerCamelCase : List[str] = 32 , __lowerCamelCase : Any = True , __lowerCamelCase : int = 1 / 255 , __lowerCamelCase : Optional[int] = True , __lowerCamelCase : Tuple = True , __lowerCamelCase : Dict = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : List[str] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : Union[str, Any] = True , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : str=400 , __lowerCamelCase : List[Any]=3 , ): '''simple docstring''' lowerCamelCase__ : str = parent lowerCamelCase__ : List[str] = do_resize lowerCamelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 288} lowerCamelCase__ : str = size_divisor lowerCamelCase__ : List[str] = do_rescale lowerCamelCase__ : str = rescale_factor lowerCamelCase__ : List[Any] = do_normalize lowerCamelCase__ : List[Any] = do_center_crop lowerCamelCase__ : str = image_mean lowerCamelCase__ : Union[str, Any] = image_std lowerCamelCase__ : int = do_pad lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = min_resolution lowerCamelCase__ : int = max_resolution def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=False ): '''simple docstring''' if not batched: lowerCamelCase__ : Optional[Any] = self.size["shortest_edge"] lowerCamelCase__ : int = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): lowerCamelCase__ , lowerCamelCase__ : Any = image.size else: lowerCamelCase__ , lowerCamelCase__ : str = image.shape[1], image.shape[2] lowerCamelCase__ : Optional[int] = size / min(__lowerCamelCase , __lowerCamelCase ) if h < w: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = size, scale * w else: lowerCamelCase__ , lowerCamelCase__ : Tuple = scale * h, size lowerCamelCase__ : Optional[int] = int((1333 / 800) * size ) if max(__lowerCamelCase , __lowerCamelCase ) > max_size: lowerCamelCase__ : Dict = max_size / max(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = newh * scale lowerCamelCase__ : Any = neww * scale lowerCamelCase__ , lowerCamelCase__ : str = int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase__ , lowerCamelCase__ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase__ : List[Any] = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : int = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] lowerCamelCase__ : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowercase ( lowercase_ , unittest.TestCase): """simple docstring""" A__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : str = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : int = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
184
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
234
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
def a ( A__ : List[str] ) -> List[Any]: """simple docstring""" _lowercase =len(UpperCamelCase__ ) while cur > 1: # Find the maximum number in arr _lowercase =arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowercase =arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase__ )] # Reverse whole list _lowercase =arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase__ )] cur -= 1 return arr if __name__ == "__main__": lowercase_ = input('Enter numbers separated by a comma:\n').strip() lowercase_ = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
205
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
from __future__ import annotations from typing import Any class lowercase__ : def __init__( self , __UpperCAmelCase = 6 )-> Tuple: '''simple docstring''' lowerCAmelCase__ = None lowerCAmelCase__ = None self.create_linked_list(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = Node() lowerCAmelCase__ = current_node lowerCAmelCase__ = current_node lowerCAmelCase__ = current_node for _ in range(1 , __UpperCAmelCase ): lowerCAmelCase__ = Node() lowerCAmelCase__ = current_node lowerCAmelCase__ = previous_node lowerCAmelCase__ = current_node lowerCAmelCase__ = self.front lowerCAmelCase__ = previous_node def UpperCAmelCase ( self )-> int: '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def UpperCAmelCase ( self , __UpperCAmelCase )-> str: '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): lowerCAmelCase__ = self.rear.next if self.rear: lowerCAmelCase__ = data def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: lowerCAmelCase__ = self.front.data lowerCAmelCase__ = None return data lowerCAmelCase__ = self.front lowerCAmelCase__ = old_front.next lowerCAmelCase__ = old_front.data lowerCAmelCase__ = None return data def UpperCAmelCase ( self )-> Dict: '''simple docstring''' if self.is_empty(): raise Exception("Empty Queue" ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception("Full Queue" ) class lowercase__ : def __init__( self )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if __name__ == "__main__": import doctest doctest.testmod()
340
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : Tuple = { """google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""", } class a (lowercase_ , lowercase_ ): """simple docstring""" __UpperCAmelCase : List[str] = '''bit''' __UpperCAmelCase : Dict = ['''preactivation''', '''bottleneck'''] __UpperCAmelCase : Any = ['''SAME''', '''VALID'''] def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Any=64 , lowerCamelCase : Union[str, Any]=[256, 512, 1024, 2048] , lowerCamelCase : int=[3, 4, 6, 3] , lowerCamelCase : Tuple="preactivation" , lowerCamelCase : List[Any]="relu" , lowerCamelCase : Any=None , lowerCamelCase : str=32 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : List[Any]=False , lowerCamelCase : str=32 , lowerCamelCase : str=1 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=None , **lowerCamelCase : str , ) -> Union[str, Any]: super().__init__(**lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: __snake_case : Any = global_padding.upper() else: raise ValueError(F'Padding strategy {global_padding} not supported' ) __snake_case : Any = num_channels __snake_case : Union[str, Any] = embedding_size __snake_case : Optional[Any] = hidden_sizes __snake_case : int = depths __snake_case : Dict = layer_type __snake_case : Tuple = hidden_act __snake_case : Any = global_padding __snake_case : str = num_groups __snake_case : Optional[Any] = drop_path_rate __snake_case : Any = embedding_dynamic_padding __snake_case : Optional[int] = output_stride __snake_case : Dict = width_factor __snake_case : List[str] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(lowerCamelCase ) + 1 )] __snake_case , __snake_case : int = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
123
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowercase__ : def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple=13 , snake_case__ : str=30 , snake_case__ : Tuple=2 , snake_case__ : List[Any]=3 , snake_case__ : List[Any]=True , snake_case__ : List[str]=True , snake_case__ : List[Any]=32 , snake_case__ : Tuple=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Dict=37 , snake_case__ : List[Any]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : str=10 , snake_case__ : List[Any]=0.02 , snake_case__ : str=3 , snake_case__ : Optional[int]=None , snake_case__ : Tuple=2 , ): lowerCamelCase_ : Union[str, Any] =parent lowerCamelCase_ : List[Any] =batch_size lowerCamelCase_ : List[Any] =image_size lowerCamelCase_ : List[Any] =patch_size lowerCamelCase_ : List[str] =num_channels lowerCamelCase_ : List[Any] =is_training lowerCamelCase_ : int =use_labels lowerCamelCase_ : Any =hidden_size lowerCamelCase_ : int =num_hidden_layers lowerCamelCase_ : str =num_attention_heads lowerCamelCase_ : str =intermediate_size lowerCamelCase_ : Optional[Any] =hidden_act lowerCamelCase_ : Optional[Any] =hidden_dropout_prob lowerCamelCase_ : int =attention_probs_dropout_prob lowerCamelCase_ : Any =type_sequence_label_size lowerCamelCase_ : Optional[Any] =initializer_range lowerCamelCase_ : int =scope lowerCamelCase_ : Dict =encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase_ : Any =(image_size // patch_size) ** 2 lowerCamelCase_ : Dict =num_patches + 2 def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ : str =None if self.use_labels: lowerCamelCase_ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Dict =self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[int] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): lowerCamelCase_ : str =DeiTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase_ : Optional[Any] =model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] ): lowerCamelCase_ : str =DeiTForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase_ : Any =model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ : str =1 lowerCamelCase_ : Tuple =DeiTForMaskedImageModeling(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase_ : int =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ : Optional[Any] =model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Tuple ): lowerCamelCase_ : Dict =self.type_sequence_label_size lowerCamelCase_ : Tuple =DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase_ : Any =model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ : List[Any] =1 lowerCamelCase_ : int =DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCamelCase_ : List[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ : int =model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : List[Any] =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) : Dict =config_and_inputs lowerCamelCase_ : Any ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowercase__ ( lowercase_, lowercase_, unittest.TestCase ): _UpperCAmelCase :List[str] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[int] = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :int = False _UpperCAmelCase :List[Any] = False def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : Optional[int] =DeiTModelTester(self ) lowerCamelCase_ : Dict =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ , lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : str =model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ , lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Union[str, Any] =model_class(snake_case__ ) lowerCamelCase_ : Optional[Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : Optional[Any] =[*signature.parameters.keys()] lowerCamelCase_ : Optional[Any] =["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str]=False ): lowerCamelCase_ : Tuple =super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Tuple ): if not self.model_tester.is_training: return lowerCamelCase_ , lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Any =True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(snake_case__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowerCamelCase_ : Optional[Any] =model_class(snake_case__ ) model.to(snake_case__ ) model.train() lowerCamelCase_ : str =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) lowerCamelCase_ : Optional[Any] =model(**snake_case__ ).loss loss.backward() def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCamelCase_ : List[Any] =False lowerCamelCase_ : int =True for model_class in self.all_model_classes: if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowerCamelCase_ : Optional[Any] =model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() lowerCamelCase_ : Optional[int] =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) lowerCamelCase_ : Tuple =model(**snake_case__ ).loss loss.backward() def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Union[str, Any] =[ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(snake_case__ ), *get_values(snake_case__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ): lowerCamelCase_ : Optional[int] =problem_type["title"] lowerCamelCase_ : List[Any] =problem_type["num_labels"] lowerCamelCase_ : Union[str, Any] =model_class(snake_case__ ) model.to(snake_case__ ) model.train() lowerCamelCase_ : List[str] =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if problem_type["num_labels"] > 1: lowerCamelCase_ : List[str] =inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) lowerCamelCase_ : Optional[Any] =inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=snake_case__ ) as warning_list: lowerCamelCase_ : Optional[int] =model(**snake_case__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def UpperCAmelCase__ ( self : List[Any] ): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : str =DeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _snake_case ( ) -> Any: lowerCamelCase_ : Optional[int] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : List[str] ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : Optional[int] =DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( snake_case__ ) lowerCamelCase_ : Any =self.default_image_processor lowerCamelCase_ : List[str] =prepare_img() lowerCamelCase_ : List[Any] =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): lowerCamelCase_ : Any =model(**snake_case__ ) # verify the logits lowerCamelCase_ : List[str] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCamelCase_ : int =torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : Optional[int] =DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) lowerCamelCase_ : str =self.default_image_processor lowerCamelCase_ : Optional[int] =prepare_img() lowerCamelCase_ : Tuple =image_processor(images=snake_case__ , return_tensors="pt" ) lowerCamelCase_ : Tuple =inputs.pixel_values.to(snake_case__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCamelCase_ : Optional[int] =model(snake_case__ )
144
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger() @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = field(default_factory=lowercase_ ) __lowerCamelCase = field(default_factory=lowercase_ ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = len(list(m.modules() ) ) == 1 or isinstance(lowercase , nn.Convad ) or isinstance(lowercase , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase ) def __call__( self , lowercase ) -> Tuple: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase ) [x.remove() for x in self.handles] return self @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 0 __lowerCamelCase = field(default_factory=lowercase_ ) __lowerCamelCase = field(default_factory=lowercase_ ) def __call__( self , lowercase ) -> int: '''simple docstring''' A__ = Tracker(self.dest )(lowercase ).parametrized A__ = Tracker(self.src )(lowercase ).parametrized A__ = list(filter(lambda lowercase : type(lowercase ) not in self.src_skip , lowercase ) ) A__ = list(filter(lambda lowercase : type(lowercase ) not in self.dest_skip , lowercase ) ) if len(lowercase ) != len(lowercase ): raise Exception( F'Numbers of operations are different. Source module has {len(lowercase )} operations while' F' destination module has {len(lowercase )}.' ) for dest_m, src_m in zip(lowercase , lowercase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any = True ) -> Dict: '''simple docstring''' print(F'Converting {name}...' ) with torch.no_grad(): A__ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() A__ = ResNetForImageClassification(UpperCamelCase__ ).eval() A__ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) A__ = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." A__ = F'resnet{"-".join(name.split("resnet" ) )}' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=UpperCamelCase__ , ) print(F'Pushed {checkpoint_name}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any] = None , SCREAMING_SNAKE_CASE_: Any = True ) -> str: '''simple docstring''' A__ = "imagenet-1k-id2label.json" A__ = 1_0_0_0 A__ = (1, num_labels) A__ = "huggingface/label-files" A__ = num_labels A__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) ) A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) A__ = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
68
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position snake_case : int = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip snake_case : List[str] = concatenate_datasets snake_case : str = DownloadConfig snake_case : Tuple = DownloadManager snake_case : Union[str, Any] = DownloadMode snake_case : Union[str, Any] = DownloadConfig snake_case : List[Any] = DownloadMode snake_case : List[str] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
281
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class UpperCAmelCase_ ( lowercase_): snake_case__ = '''unispeech''' def __init__( self : Optional[int] , __UpperCamelCase : Dict=32 , __UpperCamelCase : Optional[int]=768 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : str=3072 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[int]=1E-5 , __UpperCamelCase : Dict="group" , __UpperCamelCase : int="gelu" , __UpperCamelCase : Any=(512, 512, 512, 512, 512, 512, 512) , __UpperCamelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : List[str]=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[str]=128 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : Dict=10 , __UpperCamelCase : str=2 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Optional[Any]=320 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[str]=100 , __UpperCamelCase : Union[str, Any]=256 , __UpperCamelCase : Tuple=256 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[int]="mean" , __UpperCamelCase : str=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[Any]=256 , __UpperCamelCase : List[str]=80 , __UpperCamelCase : Any=0 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[int]=0.5 , **__UpperCamelCase : Optional[Any] , ) -> Optional[Any]: super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase ) _UpperCamelCase = hidden_size _UpperCamelCase = feat_extract_norm _UpperCamelCase = feat_extract_activation _UpperCamelCase = list(__UpperCamelCase ) _UpperCamelCase = list(__UpperCamelCase ) _UpperCamelCase = list(__UpperCamelCase ) _UpperCamelCase = conv_bias _UpperCamelCase = num_conv_pos_embeddings _UpperCamelCase = num_conv_pos_embedding_groups _UpperCamelCase = len(self.conv_dim ) _UpperCamelCase = num_hidden_layers _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = feat_proj_dropout _UpperCamelCase = final_dropout _UpperCamelCase = layerdrop _UpperCamelCase = layer_norm_eps _UpperCamelCase = initializer_range _UpperCamelCase = num_ctc_classes _UpperCamelCase = vocab_size _UpperCamelCase = do_stable_layer_norm _UpperCamelCase = use_weighted_layer_sum _UpperCamelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCamelCase = apply_spec_augment _UpperCamelCase = mask_time_prob _UpperCamelCase = mask_time_length _UpperCamelCase = mask_time_min_masks _UpperCamelCase = mask_feature_prob _UpperCamelCase = mask_feature_length _UpperCamelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _UpperCamelCase = num_codevectors_per_group _UpperCamelCase = num_codevector_groups _UpperCamelCase = contrastive_logits_temperature _UpperCamelCase = feat_quantizer_dropout _UpperCamelCase = num_negatives _UpperCamelCase = codevector_dim _UpperCamelCase = proj_codevector_dim _UpperCamelCase = diversity_loss_weight # ctc loss _UpperCamelCase = ctc_loss_reduction _UpperCamelCase = ctc_zero_infinity # pretraining loss _UpperCamelCase = replace_prob @property def _UpperCamelCase ( self : Optional[Any] ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
256
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' from functools import reduce UpperCAmelCase_ : int = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ = N ): """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str(int(UpperCamelCase__ ) * int(UpperCamelCase__ ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase__ ) - 12 ) ) if __name__ == "__main__": print(F"{solution() = }")
200
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0