code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowercase__ :Optional[int] = logging.get_logger(__name__) lowercase__ :List[str] = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Optional[Any] ='''bloom''' lowercase_ : List[str] =['''past_key_values'''] lowercase_ : Tuple ={ '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self ,A__=2_5_0_8_8_0 ,A__=6_4 ,A__=2 ,A__=8 ,A__=1E-5 ,A__=0.02 ,A__=True ,A__=1 ,A__=2 ,A__=False ,A__=0.0 ,A__=0.0 ,A__=1 ,A__=False ,**A__ ,): lowercase = vocab_size # Backward compatibility with n_embed kwarg lowercase = kwargs.pop('''n_embed''' ,A__) lowercase = hidden_size if n_embed is None else n_embed lowercase = n_layer lowercase = n_head lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = use_cache lowercase = pretraining_tp lowercase = apply_residual_connection_post_layernorm lowercase = hidden_dropout lowercase = attention_dropout lowercase = bos_token_id lowercase = eos_token_id lowercase = slow_but_exact super().__init__(bos_token_id=A__ ,eos_token_id=A__ ,**A__) class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : List[str] =version.parse('''1.12''' ) def __init__( self ,A__ ,A__ = "default" ,A__ = None ,A__ = False ,): super().__init__(A__ ,task=A__ ,patching_specs=A__ ,use_past=A__) if not getattr(self._config ,'''pad_token_id''' ,A__): # TODO: how to do that better? lowercase = 0 @property def A__ ( self): lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(A__ ,direction='''inputs''' ,inverted_values_shape=A__) lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def A__ ( self): return self._config.n_layer @property def A__ ( self): return self._config.n_head @property def A__ ( self): return 1E-3 def A__ ( self ,A__ ,A__ = -1 ,A__ = -1 ,A__ = False ,A__ = None ,): lowercase = super(A__ ,self).generate_dummy_inputs( A__ ,batch_size=A__ ,seq_length=A__ ,is_pair=A__ ,framework=A__) # We need to order the input in the way they appears in the forward() lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch lowercase , lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase = seqlen + 2 lowercase = self._config.hidden_size // self.num_attention_heads lowercase = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase = [ (torch.zeros(A__), torch.zeros(A__)) for _ in range(self.num_layers) ] lowercase = common_inputs['''attention_mask'''] if self.use_past: lowercase = ordered_inputs['''attention_mask'''].dtype lowercase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(A__ ,A__ ,dtype=A__)] ,dim=1) return ordered_inputs @property def A__ ( self): return 1_3
101
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None: super().__init__(**A_ ) __UpperCamelCase =size if size is not None else {'shortest_edge': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) __UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) __UpperCamelCase =do_resize __UpperCamelCase =size __UpperCamelCase =resample __UpperCamelCase =do_center_crop __UpperCamelCase =crop_size __UpperCamelCase =do_rescale __UpperCamelCase =rescale_factor __UpperCamelCase =do_normalize __UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase =do_convert_rgb def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image: __UpperCamelCase =do_resize if do_resize is not None else self.do_resize __UpperCamelCase =size if size is not None else self.size __UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ ) __UpperCamelCase =resample if resample is not None else self.resample __UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase =crop_size if crop_size is not None else self.crop_size __UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) __UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase =image_mean if image_mean is not None else self.image_mean __UpperCamelCase =image_std if image_std is not None else self.image_std __UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase =make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase =[convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase =[to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images] __UpperCamelCase ={'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
62
0
"""simple docstring""" from PIL import Image def lowercase ( _snake_case : Image ) ->Image: """simple docstring""" __snake_case , __snake_case : Any = image.size __snake_case : Dict = 0 __snake_case : Tuple = image.load() for i in range(_snake_case ): for j in range(_snake_case ): __snake_case : int = pixels[j, i] mean += pixel mean //= width * height for j in range(_snake_case ): for i in range(_snake_case ): __snake_case : Tuple = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
102
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =qkv_bias __UpperCamelCase =num_detection_tokens __UpperCamelCase =use_mid_position_embeddings __UpperCamelCase =auxiliary_loss # Hungarian matcher __UpperCamelCase =class_cost __UpperCamelCase =bbox_cost __UpperCamelCase =giou_cost # Loss coefficients __UpperCamelCase =bbox_loss_coefficient __UpperCamelCase =giou_loss_coefficient __UpperCamelCase =eos_coefficient class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self ) -> float: return 1E-4 @property def _a ( self ) -> int: return 12
62
0
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=UpperCamelCase_ ): _a = ['''onnx'''] def __init__( self : str , *A_ : Dict , **A_ : Union[str, Any]): requires_backends(self , ['''onnx''']) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *A_ : List[str] , **A_ : Optional[Any]): requires_backends(cls , ['''onnx''']) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *A_ : Dict , **A_ : List[str]): requires_backends(cls , ['''onnx'''])
103
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''Salesforce/codegen-350M-mono''': 2048, } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE : Tuple = CodeGenTokenizer def __init__( self : List[str] ,lowercase__ : Tuple=None ,lowercase__ : int=None ,lowercase__ : int=None ,lowercase__ : Dict="<|endoftext|>" ,lowercase__ : List[str]="<|endoftext|>" ,lowercase__ : Union[str, Any]="<|endoftext|>" ,lowercase__ : List[str]=False ,**lowercase__ : Optional[int] ,): super().__init__( lowercase__ ,lowercase__ ,tokenizer_file=lowercase__ ,unk_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,add_prefix_space=lowercase__ ,**lowercase__ ,) if kwargs.pop('''add_bos_token''' ,lowercase__ ): __lowercase = kwargs.pop('''name_or_path''' ,'''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) __lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' ,lowercase__ ) != add_prefix_space: __lowercase = getattr(lowercase__ ,pre_tok_state.pop('''type''' ) ) __lowercase = add_prefix_space __lowercase = pre_tok_class(**lowercase__ ) __lowercase = add_prefix_space def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : int ,**lowercase__ : Dict ): __lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : Union[str, Any] ,**lowercase__ : List[Any] ): __lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Optional[str] = None ): __lowercase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ ) return tuple(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,lowercase__ : bool = False ,lowercase__ : bool = None ,lowercase__ : Optional[List[str]] = None ,**lowercase__ : int ,): __lowercase = super().decode( token_ids=lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ ,**lowercase__ ,) if truncate_before_pattern is not None and len(lowercase__ ) > 0: __lowercase = self.truncate(lowercase__ ,lowercase__ ) return decoded_text def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : int ): def find_re(lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : int ): __lowercase = pattern.search(lowercase__ ,lowercase__ ) return m.start() if m else -1 __lowercase = [re.compile(lowercase__ ,re.MULTILINE ) for pattern in truncate_before_pattern] __lowercase = list(re.finditer('''^print''' ,lowercase__ ,re.MULTILINE ) ) if len(lowercase__ ) > 1: __lowercase = completion[: prints[1].start()] __lowercase = list(re.finditer('''^def''' ,lowercase__ ,re.MULTILINE ) ) if len(lowercase__ ) > 1: __lowercase = completion[: defs[1].start()] __lowercase = 0 __lowercase = [ pos for pos in [find_re(lowercase__ ,lowercase__ ,lowercase__ ) for terminal in terminals] if pos != -1 ] if len(lowercase__ ) > 0: return completion[: min(lowercase__ )] else: return completion
104
from __future__ import annotations import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =size # approximate the overall size of segment tree with given value __UpperCamelCase =[0 for i in range(0 , 4 * size )] # create array to store lazy update __UpperCamelCase =[0 for i in range(0 , 4 * size )] __UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self , A_ ) -> int: return idx * 2 def _a ( self , A_ ) -> int: return idx * 2 + 1 def _a ( self , A_ , A_ , A_ , A_ ) -> None: if left_element == right_element: __UpperCamelCase =a[left_element - 1] else: __UpperCamelCase =(left_element + right_element) // 2 self.build(self.left(A_ ) , A_ , A_ , A_ ) self.build(self.right(A_ ) , mid + 1 , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase =val if left_element != right_element: __UpperCamelCase =val __UpperCamelCase =val __UpperCamelCase =True __UpperCamelCase =True return True __UpperCamelCase =(left_element + right_element) // 2 self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ ) self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) return True def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase =(left_element + right_element) // 2 __UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ ) __UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ ) return max(A_ , A_ ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _A = 15 _A = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
62
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer a : List[str] = logging.get_logger(__name__) a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a : str = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } a : Tuple = {'''allegro/herbert-base-cased''': 514} a : Optional[int] = {} class __UpperCamelCase ( a__ ): lowerCamelCase : str =VOCAB_FILES_NAMES lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] =HerbertTokenizer def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]: super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Optional[Any] = [self.cls_token_id] a : Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: a : Dict = [self.sep_token_id] a : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
105
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
0
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : int ,lowercase_ : Tuple ,lowercase_ : int=3 ,lowercase_ : Dict=3_2 ,lowercase_ : Optional[int]=3 ,lowercase_ : Tuple=1_0 ,lowercase_ : Tuple=[1_0, 2_0, 3_0, 4_0] ,lowercase_ : List[str]=[1, 1, 2, 1] ,lowercase_ : Dict=True ,lowercase_ : str=True ,lowercase_ : List[Any]="relu" ,lowercase_ : List[str]=3 ,lowercase_ : int=None ,): lowerCAmelCase__ : Tuple = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : str = image_size lowerCAmelCase__ : Optional[Any] = num_channels lowerCAmelCase__ : Tuple = embeddings_size lowerCAmelCase__ : Any = hidden_sizes lowerCAmelCase__ : Any = depths lowerCAmelCase__ : List[Any] = is_training lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Any = num_labels lowerCAmelCase__ : Optional[int] = scope lowerCAmelCase__ : Union[str, Any] = len(lowercase_ ) def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = self.get_config() return config, pixel_values def __lowerCAmelCase ( self : Any ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def __lowerCAmelCase ( self : List[str] ,lowercase_ : Dict ,lowercase_ : Tuple ): lowerCAmelCase__ : List[Any] = FlaxRegNetModel(config=lowercase_ ) lowerCAmelCase__ : List[str] = model(lowercase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,) def __lowerCAmelCase ( self : Any ,lowercase_ : int ,lowercase_ : Dict ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : int = FlaxRegNetForImageClassification(config=lowercase_ ) lowerCAmelCase__ : Tuple = model(lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = config_and_inputs lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowercase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Any = FlaxRegNetModelTester(self ) lowerCAmelCase__ : Tuple = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ) def __lowerCAmelCase ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCAmelCase ( self : int ): return def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __lowerCAmelCase ( self : Any ): pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __lowerCAmelCase ( self : Optional[int] ): pass def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class(lowercase_ ) lowerCAmelCase__ : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,lowercase_ ) def __lowerCAmelCase ( self : Dict ): def check_hidden_states_output(lowercase_ : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : int ): lowerCAmelCase__ : List[str] = model_class(lowercase_ ) lowerCAmelCase__ : int = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) ) lowerCAmelCase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ : int = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) ,expected_num_stages + 1 ) lowerCAmelCase__ ,lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ ) def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : Any = self._prepare_for_class(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Union[str, Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : str ,**lowercase_ : Tuple ): return model(pixel_values=lowercase_ ,**lowercase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ : Tuple = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ ,lowercase_ ): self.assertEqual(jitted_output.shape ,output.shape ) def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCAmelCase ( self : List[str] ): return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : Tuple = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowerCAmelCase__ : Optional[int] = self.default_image_processor lowerCAmelCase__ : List[Any] = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ) lowerCAmelCase__ : int = model(**lowercase_ ) # verify the logits lowerCAmelCase__ : Optional[Any] = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape ,lowercase_ ) lowerCAmelCase__ : Optional[int] = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
106
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip_vision_model" def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =intermediate_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =patch_size __UpperCamelCase =image_size __UpperCamelCase =initializer_range __UpperCamelCase =attention_dropout __UpperCamelCase =layer_norm_eps __UpperCamelCase =hidden_act __UpperCamelCase =qkv_bias @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "instructblip_qformer" def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =position_embedding_type __UpperCamelCase =cross_attention_frequency __UpperCamelCase =encoder_hidden_size @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip" UpperCAmelCase__ : Optional[Any] = True def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]: super().__init__(**A_ ) if vision_config is None: __UpperCamelCase ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCamelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase =InstructBlipVisionConfig(**A_ ) __UpperCamelCase =InstructBlipQFormerConfig(**A_ ) __UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase =self.text_config.tie_word_embeddings __UpperCamelCase =self.text_config.is_encoder_decoder __UpperCamelCase =num_query_tokens __UpperCamelCase =self.vision_config.hidden_size __UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase =1.0 __UpperCamelCase =0.02 @classmethod def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.vision_config.to_dict() __UpperCamelCase =self.qformer_config.to_dict() __UpperCamelCase =self.text_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = DanceDiffusionPipeline SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS SCREAMING_SNAKE_CASE_ : List[Any] = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } SCREAMING_SNAKE_CASE_ : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Dict = False def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: torch.manual_seed(0 ) a = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCamelCase , use_timestep_embedding=__lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) a = IPNDMScheduler() a = { "unet": unet, "scheduler": scheduler, } return components def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=0 ) -> Optional[Any]: if str(__lowerCamelCase ).startswith("mps" ): a = torch.manual_seed(__lowerCamelCase ) else: a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) a = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def __UpperCAmelCase ( self : Dict ) -> Any: a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = DanceDiffusionPipeline(**__lowerCamelCase ) a = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs(__lowerCamelCase ) a = pipe(**__lowerCamelCase ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) a = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: return super().test_save_load_local() @skip_mps def __UpperCAmelCase ( self : str ) -> Tuple: return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def __UpperCAmelCase ( self : List[Any] ) -> List[str]: return super().test_save_load_optional_components() @skip_mps def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: return super().test_attention_slicing_forward_pass() def __UpperCAmelCase ( self : Tuple ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Optional[int] ) -> str: a = torch_device a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) a = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = torch.manual_seed(0 ) a = pipe(generator=__lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Dict ) -> List[Any]: a = torch_device a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) a = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = torch.manual_seed(0 ) a = pipe(generator=__lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
107
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _A = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _A = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(rows * cols * num_images ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) __UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =labels_dense.shape[0] __UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes __UpperCamelCase =numpy.zeros((num_labels, num_classes) ) __UpperCamelCase =1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( A_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __UpperCamelCase =10000 __UpperCamelCase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __UpperCamelCase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase =images.astype(numpy.floataa ) __UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 ) __UpperCamelCase =images __UpperCamelCase =labels __UpperCamelCase =0 __UpperCamelCase =0 @property def _a ( self ) -> Tuple: return self._images @property def _a ( self ) -> Union[str, Any]: return self._labels @property def _a ( self ) -> Optional[Any]: return self._num_examples @property def _a ( self ) -> List[str]: return self._epochs_completed def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]: if fake_data: __UpperCamelCase =[1] * 784 __UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) __UpperCamelCase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perma] __UpperCamelCase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase =self._num_examples - start __UpperCamelCase =self._images[start : self._num_examples] __UpperCamelCase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perm] __UpperCamelCase =self.labels[perm] # Start next epoch __UpperCamelCase =0 __UpperCamelCase =batch_size - rest_num_examples __UpperCamelCase =self._index_in_epoch __UpperCamelCase =self._images[start:end] __UpperCamelCase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): gfile.MakeDirs(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase =f.size() print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' ) return filepath @deprecated( SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =fake() __UpperCamelCase =fake() __UpperCamelCase =fake() return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ ) if not source_url: # empty string check __UpperCamelCase =DEFAULT_SOURCE_URL __UpperCamelCase ='train-images-idx3-ubyte.gz' __UpperCamelCase ='train-labels-idx1-ubyte.gz' __UpperCamelCase ='t10k-images-idx3-ubyte.gz' __UpperCamelCase ='t10k-labels-idx1-ubyte.gz' __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =( 'Validation size should be between 0 and ' F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =train_images[:validation_size] __UpperCamelCase =train_labels[:validation_size] __UpperCamelCase =train_images[validation_size:] __UpperCamelCase =train_labels[validation_size:] __UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed} __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
62
0
"""simple docstring""" from __future__ import annotations def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): # noqa: E741 '''simple docstring''' while r - l > 1: lowerCAmelCase : str = (l + r) // 2 if v[m] >= key: lowerCAmelCase : Union[str, Any] = m else: lowerCAmelCase : str = m # noqa: E741 return r def a__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) == 0: return 0 lowerCAmelCase : Any = [0] * len(SCREAMING_SNAKE_CASE ) lowerCAmelCase : List[str] = 1 lowerCAmelCase : Optional[Any] = v[0] for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): if v[i] < tail[0]: lowerCAmelCase : List[Any] = v[i] elif v[i] > tail[length - 1]: lowerCAmelCase : Optional[int] = v[i] length += 1 else: lowerCAmelCase : List[str] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
108
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = TransfoXLTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False def _a ( self ) -> Union[str, Any]: super().setUp() __UpperCamelCase =[ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self , **A_ ) -> Optional[int]: __UpperCamelCase =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='<unk> UNwanted , running' __UpperCamelCase ='<unk> unwanted, running' return input_text, output_text def _a ( self ) -> str: __UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _a ( self ) -> Any: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self ) -> int: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) __UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' __UpperCamelCase =[ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , 'new1' )
62
0
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A: Optional[Any] = pd.read_csv("sample_data.csv", header=None) A: List[str] = df.shape[:1][0] # If you're using some other dataset input the target column A: Optional[int] = df.iloc[:, 1:2] A: int = actual_data.values.reshape(len_data, 1) A: List[Any] = MinMaxScaler().fit_transform(actual_data) A: Union[str, Any] = 1_0 A: str = 5 A: Union[str, Any] = 2_0 A: Union[str, Any] = len_data - periods * look_back A: Optional[int] = actual_data[:division] A: Tuple = actual_data[division - look_back :] A , A: int = [], [] A , A: Tuple = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A: Optional[int] = np.array(train_x) A: Optional[Any] = np.array(test_x) A: int = np.array([list(i.ravel()) for i in train_y]) A: str = np.array([list(i.ravel()) for i in test_y]) A: Dict = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") A: int = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) A: List[str] = model.predict(x_test)
109
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _a ( UpperCamelCase__ ): def __init__( self: Tuple , UpperCamelCase_: Callable , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[dict] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[int] , ) -> str: """simple docstring""" super().__init__( features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) lowercase__ = Generator( cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCamelCase_ ( self: Dict ) -> Any: """simple docstring""" if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) lowercase__ = self.builder.as_dataset( split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
110
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f: __UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] next(SCREAMING_SNAKE_CASE__ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =[] for dataset in encoded_datasets: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa ) __UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =mc_label __UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) __UpperCamelCase =parser.parse_args() print(SCREAMING_SNAKE_CASE__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __UpperCamelCase =['_start_', '_delimiter_', '_classify_'] __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) model.to(SCREAMING_SNAKE_CASE__ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj] logger.info('Encoding dataset...' ) __UpperCamelCase =load_rocstories_dataset(args.train_dataset ) __UpperCamelCase =load_rocstories_dataset(args.eval_dataset ) __UpperCamelCase =(train_dataset, eval_dataset) __UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) # Compute the max input length for the Transformer __UpperCamelCase =model.config.n_positions // 2 - 2 __UpperCamelCase =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1] __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size ) __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __UpperCamelCase =args.max_steps __UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1 else: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs __UpperCamelCase =list(model.named_parameters() ) __UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __UpperCamelCase =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon ) __UpperCamelCase =get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ ) if args.do_train: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch __UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __UpperCamelCase =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE__ ) if args.do_eval: model.eval() __UpperCamelCase , __UpperCamelCase =0, 0 __UpperCamelCase , __UpperCamelCase =0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch with torch.no_grad(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model( SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =mc_logits.detach().cpu().numpy() __UpperCamelCase =mc_labels.to('cpu' ).numpy() __UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __UpperCamelCase =eval_loss / nb_eval_steps __UpperCamelCase =eval_accuracy / nb_eval_examples __UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None __UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
62
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets a__ : Optional[Any] ='''\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n''' a__ : Tuple ='''\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n''' a__ : int ='''\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def _lowerCamelCase ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Optional[int] , __A : Any=None , __A : Union[str, Any]=1 , __A : Any="binary" , __A : Union[str, Any]=None ): __UpperCamelCase = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
53
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ): __UpperCamelCase =1 __UpperCamelCase =0 __UpperCamelCase =1 __UpperCamelCase =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
62
0
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = set(SCREAMING_SNAKE_CASE__ ), [start] while stack: SCREAMING_SNAKE_CASE__ = stack.pop() explored.add(SCREAMING_SNAKE_CASE__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(SCREAMING_SNAKE_CASE__ ) return explored __lowerCamelCase : Optional[Any] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
219
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import os def UpperCamelCase( ): UpperCAmelCase : List[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'triangle.txt' ) with open(SCREAMING_SNAKE_CASE__ ) as f: UpperCAmelCase : str = f.readlines() UpperCAmelCase : Union[str, Any] = [] for line in triangle: UpperCAmelCase : Dict = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(SCREAMING_SNAKE_CASE__ ) ) a.append(SCREAMING_SNAKE_CASE__ ) for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): for j in range(len(a[i] ) ): UpperCAmelCase : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCAmelCase : int = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
151
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _A : str = 16 _A : Dict = 32 def UpperCamelCase_ ( snake_case_ : Accelerator , snake_case_ : DatasetDict , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int = 16 ) -> Union[str, Any]: '''simple docstring''' __lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowerCAmelCase = DatasetDict( { """train""": dataset["""train"""].select(SCREAMING_SNAKE_CASE__ ), """validation""": dataset["""train"""].select(SCREAMING_SNAKE_CASE__ ), """test""": dataset["""validation"""], } ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case_ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = DataLoader( tokenized_datasets["""test"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader, test_dataloader def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict ) -> str: '''simple docstring''' __lowerCAmelCase = [] # Download the dataset __lowerCAmelCase = load_dataset("""glue""" , """mrpc""" ) # Create our splits __lowerCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config["""lr"""] __lowerCAmelCase = int(config["""num_epochs"""] ) __lowerCAmelCase = int(config["""seed"""] ) __lowerCAmelCase = int(config["""batch_size"""] ) __lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __lowerCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE __lowerCAmelCase = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE__ ) # New Code # # Create our folds: __lowerCAmelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) __lowerCAmelCase = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_fold_dataloaders( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = outputs.loss __lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ ) # New Code # # We also run predictions on the test set at the very end __lowerCAmelCase = [] for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = outputs.logits __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) __lowerCAmelCase = torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __lowerCAmelCase = metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) accelerator.print("""Average test metrics from all folds:""" , SCREAMING_SNAKE_CASE__ ) def UpperCamelCase_ ( ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""The number of splits to perform across the dataset""" ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
229
from typing import TYPE_CHECKING from ...utils import _LazyModule _A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A_ ) class A_ ( A_ ): _lowerCamelCase : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) _lowerCamelCase : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) _lowerCamelCase : ClassVar[Features] = Features({} ) _lowerCamelCase : str = "text" @property def lowercase ( self : Tuple ): return {self.text_column: "text"}
22
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ ) -> float: return 0.0 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) ) plt.show()
62
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase__ : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCAmelCase__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCAmelCase__ : List[str] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: str ): __lowerCamelCase = ZeroShotClassificationPipeline( model=A_ , tokenizer=A_ , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: str ): __lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} ) # No kwarg __lowerCamelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} ) __lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} ) __lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) __lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) __lowerCamelCase = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} ) # https://github.com/huggingface/transformers/issues/13846 __lowerCamelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A_ , [ {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} for i in range(1 ) ] , ) __lowerCamelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A_ , [ {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} for i in range(2 ) ] , ) with self.assertRaises(A_ ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A_ ): classifier(A_ , candidate_labels="""politics""" ) with self.assertRaises(A_ ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A_ ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A_ ) with self.assertRaises(A_ ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A_ ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A_ , ) self.run_entailment_id(A_ ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[Any] ): __lowerCamelCase = zero_shot_classifier.model.config __lowerCamelCase = config.labelaid __lowerCamelCase = zero_shot_classifier.entailment_id __lowerCamelCase = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) __lowerCamelCase = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) __lowerCamelCase = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) __lowerCamelCase = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) __lowerCamelCase = original_labelaid self.assertEqual(A_ , zero_shot_classifier.entailment_id ) @require_torch def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) __lowerCamelCase = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.333, 0.333, 0.333], } , ) @require_tf def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) __lowerCamelCase = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.333, 0.333, 0.333], } , ) @slow @require_torch def lowerCAmelCase__ ( self: int ): __lowerCamelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) __lowerCamelCase = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.976, 0.015, 0.009], } , ) __lowerCamelCase = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A_ , ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) __lowerCamelCase = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.976, 0.015, 0.009], } , ) __lowerCamelCase = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A_ , ) self.assertEqual( nested_simplify(A_ ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.817, 0.713, 0.018, 0.018], } , )
12
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase : Optional[int] = """\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n""" def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=8 ): """simple docstring""" a__ : List[str] =height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a__ : Any =width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __lowerCAmelCase ( A_): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__() self.register_modules( unet=A_ , scheduler=A_ , movq=A_ , ) a__ : List[str] =2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' if latents is None: a__ : List[str] =randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a__ : Union[str, Any] =latents.to(A_ ) a__ : int =latents * scheduler.init_noise_sigma return latents def _lowercase ( self , lowerCAmelCase__=0 ) -> List[str]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a__ : Optional[int] =torch.device(F'''cuda:{gpu_id}''' ) a__ : Any =[ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A_ , A_ ) def _lowercase ( self , lowerCAmelCase__=0 ) -> Tuple: '''simple docstring''' if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a__ : Optional[int] =torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=A_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a__ : Dict =None for cpu_offloaded_model in [self.unet, self.movq]: a__ , a__ : Optional[Any] =cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ ) # We'll offload the last model manually. a__ : Any =hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase ( self ) -> Dict: '''simple docstring''' if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(A_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A_ ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Dict: '''simple docstring''' a__ : Union[str, Any] =self._execution_device a__ : Optional[Any] =guidance_scale > 1.0 if isinstance(A_ , A_ ): a__ : Optional[int] =torch.cat(A_ , dim=0 ) a__ : Optional[Any] =image_embeds.shape[0] * num_images_per_prompt if isinstance(A_ , A_ ): a__ : Optional[int] =torch.cat(A_ , dim=0 ) if do_classifier_free_guidance: a__ : Any =image_embeds.repeat_interleave(A_ , dim=0 ) a__ : Union[str, Any] =negative_image_embeds.repeat_interleave(A_ , dim=0 ) a__ : Tuple =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ ) self.scheduler.set_timesteps(A_ , device=A_ ) a__ : Union[str, Any] =self.scheduler.timesteps a__ : str =self.unet.config.in_channels a__ , a__ : Union[str, Any] =downscale_height_and_width(A_ , A_ , self.movq_scale_factor ) # create initial latent a__ : Optional[Any] =self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(A_ ) ): # expand the latents if we are doing classifier free guidance a__ : Optional[Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a__ : List[str] ={"image_embeds": image_embeds} a__ : List[str] =self.unet( sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0] if do_classifier_free_guidance: a__ , a__ : Union[str, Any] =noise_pred.split(latents.shape[1] , dim=1 ) a__ , a__ : Optional[int] =noise_pred.chunk(2 ) a__ , a__ : Dict =variance_pred.chunk(2 ) a__ : Union[str, Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a__ : Tuple =torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a__ , a__ : List[str] =noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a__ : List[Any] =self.scheduler.step( A_ , A_ , A_ , generator=A_ , )[0] # post-processing a__ : Dict =self.movq.decode(A_ , force_not_quantize=A_ )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a__ : Tuple =image * 0.5 + 0.5 a__ : Optional[int] =image.clamp(0 , 1 ) a__ : str =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a__ : Optional[Any] =self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
95
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "mvp" UpperCAmelCase__ : Tuple = ["past_key_values"] UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]: __UpperCamelCase =vocab_size __UpperCamelCase =max_position_embeddings __UpperCamelCase =d_model __UpperCamelCase =encoder_ffn_dim __UpperCamelCase =encoder_layers __UpperCamelCase =encoder_attention_heads __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_attention_heads __UpperCamelCase =dropout __UpperCamelCase =attention_dropout __UpperCamelCase =activation_dropout __UpperCamelCase =activation_function __UpperCamelCase =init_std __UpperCamelCase =encoder_layerdrop __UpperCamelCase =decoder_layerdrop __UpperCamelCase =classifier_dropout __UpperCamelCase =use_cache __UpperCamelCase =encoder_layers __UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase =use_prompt __UpperCamelCase =prompt_length __UpperCamelCase =prompt_mid_dim super().__init__( pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ): __UpperCamelCase =self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
62
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __A ( A_ ): '''simple docstring''' lowerCAmelCase_ = "unispeech" def __init__( self , __lowerCAmelCase=3_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.05 , __lowerCAmelCase=1_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0 , __lowerCAmelCase=3_2_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=0.1 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=8_0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=0.5 , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ ) lowerCamelCase__ = hidden_size lowerCamelCase__ = feat_extract_norm lowerCamelCase__ = feat_extract_activation lowerCamelCase__ = list(A_ ) lowerCamelCase__ = list(A_ ) lowerCamelCase__ = list(A_ ) lowerCamelCase__ = conv_bias lowerCamelCase__ = num_conv_pos_embeddings lowerCamelCase__ = num_conv_pos_embedding_groups lowerCamelCase__ = len(self.conv_dim ) lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = num_attention_heads lowerCamelCase__ = hidden_dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = feat_proj_dropout lowerCamelCase__ = final_dropout lowerCamelCase__ = layerdrop lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range lowerCamelCase__ = num_ctc_classes lowerCamelCase__ = vocab_size lowerCamelCase__ = do_stable_layer_norm lowerCamelCase__ = use_weighted_layer_sum lowerCamelCase__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__ = apply_spec_augment lowerCamelCase__ = mask_time_prob lowerCamelCase__ = mask_time_length lowerCamelCase__ = mask_time_min_masks lowerCamelCase__ = mask_feature_prob lowerCamelCase__ = mask_feature_length lowerCamelCase__ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase__ = num_codevectors_per_group lowerCamelCase__ = num_codevector_groups lowerCamelCase__ = contrastive_logits_temperature lowerCamelCase__ = feat_quantizer_dropout lowerCamelCase__ = num_negatives lowerCamelCase__ = codevector_dim lowerCamelCase__ = proj_codevector_dim lowerCamelCase__ = diversity_loss_weight # ctc loss lowerCamelCase__ = ctc_loss_reduction lowerCamelCase__ = ctc_zero_infinity # pretraining loss lowerCamelCase__ = replace_prob @property def __lowerCamelCase ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
209
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = GPTaTokenizer UpperCAmelCase__ : Any = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : int = {"add_prefix_space": True} UpperCAmelCase__ : Any = False def _a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _a ( self , **A_ ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='lower newer' __UpperCamelCase ='lower newer' return input_text, output_text def _a ( self ) -> List[Any]: __UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase ='lower newer' __UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self ) -> int: if not self.test_rust_tokenizer: return __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase ='lower newer' # Testing tokenization __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token __UpperCamelCase =tokens + [rust_tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self , *A_ , **A_ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _a ( self , A_=15 ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def _a ( self ) -> int: __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __UpperCamelCase =tokenizer.pad_token_id __UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) __UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ='$$$' __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =tokenizer.bos_token_id __UpperCamelCase =tokenizer(A_ ) __UpperCamelCase =tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase =tokenizer.decode(out_s.input_ids ) __UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> Any: # TODO: change to self.get_tokenizers() when the fast version is implemented __UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase ='Encode this.' __UpperCamelCase ='This one too please.' __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) __UpperCamelCase =tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) __UpperCamelCase =encoded_sequence_dict['input_ids'] __UpperCamelCase =encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) , len(A_ ) ) __UpperCamelCase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] __UpperCamelCase =[x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) def _a ( self ) -> Dict: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _a ( self ) -> List[Any]: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='bos' __UpperCamelCase =tokenizer.get_vocab()['bos'] __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
62
0
import requests snake_case__ : Union[str, Any] = 'YOUR API KEY' def _a ( lowerCamelCase: str , lowerCamelCase: str = giphy_api_key ) -> Dict: '''simple docstring''' __A = '''+'''.join(query.split() ) __A = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" __A = requests.get(SCREAMING_SNAKE_CASE__ ).json()['''data'''] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('\n'.join(get_gifs('space ship')))
117
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
62
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :str = [], [] while len(SCREAMING_SNAKE_CASE__ ) > 1: __UpperCamelCase , __UpperCamelCase :Union[str, Any] = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ ) start.append(SCREAMING_SNAKE_CASE__ ) end.append(SCREAMING_SNAKE_CASE__ ) collection.remove(SCREAMING_SNAKE_CASE__ ) collection.remove(SCREAMING_SNAKE_CASE__ ) end.reverse() return start + collection + end if __name__ == "__main__": __lowercase = input('''Enter numbers separated by a comma:\n''').strip() __lowercase = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
43
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ): __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] __UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
62
0
from collections.abc import Generator def __lowerCamelCase ( ): """simple docstring""" lowercase__ , lowercase__ : Dict = 0, 1 while True: lowercase__ , lowercase__ : int = b, a + b yield b def __lowerCamelCase ( lowerCamelCase__ = 1_000 ): """simple docstring""" lowercase__ : Union[str, Any] = 1 lowercase__ : Dict = fibonacci_generator() while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '▁' _A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} _A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } _A = {'vinai/bartpho-syllable': 1024} class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase =vocab_file __UpperCamelCase =monolingual_vocab_file __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase ={} __UpperCamelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =cnt cnt += 1 with open(A_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase =line.strip().split()[0] __UpperCamelCase =len(self.fairseq_tokens_to_ids ) if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =len(self.fairseq_tokens_to_ids ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> List[str]: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase =[self.cls_token_id] __UpperCamelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> Any: return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def _a ( self , A_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , A_ ) -> int: return self.fairseq_ids_to_tokens[index] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(A_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( A_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , A_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(A_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(A_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
62
0
'''simple docstring''' def lowercase__ ( __lowercase : int , __lowercase : int ) -> List[str]: """simple docstring""" return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ ) def lowercase__ ( __lowercase : int , __lowercase : int ) -> Optional[Any]: """simple docstring""" while y: # --> when y=0 then loop will terminate and return x as final GCD. __UpperCamelCase , __UpperCamelCase = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowercase__ ( ) -> Dict: """simple docstring""" try: __UpperCamelCase = input('Enter two integers separated by comma (,): ' ).split(',' ) __UpperCamelCase = int(nums[0] ) __UpperCamelCase = int(nums[1] ) print( F'''greatest_common_divisor({num_a}, {num_a}) = ''' F'''{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' ) print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' ) except (IndexError, UnboundLocalError, ValueError): print('Wrong input' ) if __name__ == "__main__": main()
53
from numpy import exp, pi, sqrt def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
62
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __lowerCamelCase : Union[str, Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
219
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None: super().__init__(**A_ ) __UpperCamelCase =size if size is not None else {'shortest_edge': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) __UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) __UpperCamelCase =do_resize __UpperCamelCase =size __UpperCamelCase =resample __UpperCamelCase =do_center_crop __UpperCamelCase =crop_size __UpperCamelCase =do_rescale __UpperCamelCase =rescale_factor __UpperCamelCase =do_normalize __UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase =do_convert_rgb def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image: __UpperCamelCase =do_resize if do_resize is not None else self.do_resize __UpperCamelCase =size if size is not None else self.size __UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ ) __UpperCamelCase =resample if resample is not None else self.resample __UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase =crop_size if crop_size is not None else self.crop_size __UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) __UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase =image_mean if image_mean is not None else self.image_mean __UpperCamelCase =image_std if image_std is not None else self.image_std __UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase =make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase =[convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase =[to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images] __UpperCamelCase ={'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
62
0
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowercase__ = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" lowercase__ = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" lowercase__ = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n" lowercase__ = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" lowercase__ = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str]=[1, 10, 100] , lowercase_ : Union[str, Any]=4 , lowercase_ : str=3.0 ) -> str: if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=A_ ) as executor: UpperCAmelCase : List[str] = [] UpperCAmelCase : int = Counter() UpperCAmelCase : Any = 0 UpperCAmelCase : Tuple = defaultdict(A_ ) for task_id, (candidates, test_case) in enumerate(zip(A_ , A_ ) ): for candidate in candidates: UpperCAmelCase : Dict = candidate + '\n' + test_case UpperCAmelCase : str = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase : int = executor.submit(A_ , *A_ ) futures.append(A_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(A_ ): UpperCAmelCase : Union[str, Any] = future.result() results[result["task_id"]].append((result['completion_id'], result) ) UpperCAmelCase , UpperCAmelCase : Any = [], [] for result in results.values(): result.sort() UpperCAmelCase : int = [r[1]['passed'] for r in result] total.append(len(A_ ) ) correct.append(sum(A_ ) ) UpperCAmelCase : List[str] = np.array(A_ ) UpperCAmelCase : Tuple = np.array(A_ ) UpperCAmelCase : str = k UpperCAmelCase : Tuple = {f"""pass@{k}""": estimate_pass_at_k(A_ , A_ , A_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : Any = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Optional[Any] = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
151
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =qkv_bias __UpperCamelCase =num_detection_tokens __UpperCamelCase =use_mid_position_embeddings __UpperCamelCase =auxiliary_loss # Hungarian matcher __UpperCamelCase =class_cost __UpperCamelCase =bbox_cost __UpperCamelCase =giou_cost # Loss coefficients __UpperCamelCase =bbox_loss_coefficient __UpperCamelCase =giou_loss_coefficient __UpperCamelCase =eos_coefficient class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self ) -> float: return 1E-4 @property def _a ( self ) -> int: return 12
62
0
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def UpperCamelCase_ ( snake_case_ : int ) -> Optional[Any]: '''simple docstring''' if "model" in orig_key: __lowerCAmelCase = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: __lowerCAmelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: __lowerCAmelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: __lowerCAmelCase = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: __lowerCAmelCase = orig_key.split(""".""" )[0].split("""_""" )[-1] __lowerCAmelCase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: __lowerCAmelCase = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: __lowerCAmelCase = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: __lowerCAmelCase = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: __lowerCAmelCase = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: __lowerCAmelCase = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: __lowerCAmelCase = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: __lowerCAmelCase = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: __lowerCAmelCase = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: __lowerCAmelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: __lowerCAmelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: __lowerCAmelCase = """yoso.""" + orig_key return orig_key def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __lowerCAmelCase = val __lowerCAmelCase = orig_state_dict["""cls.predictions.decoder.bias"""] __lowerCAmelCase = torch.arange(SCREAMING_SNAKE_CASE__ ).expand((1, -1) ) + 2 return orig_state_dict def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[str] ) -> int: '''simple docstring''' __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model_state_dict"""] __lowerCAmelCase = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = YosoForMaskedLM(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE__ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE__ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _A : str = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
229
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class A_ ( A_ ): _lowerCamelCase : "DiagonalGaussianDistribution" class A_ ( A_ , A_ ): _lowerCamelCase : List[Any] = True @register_to_config def __init__( self : Optional[Any] , snake_case_ : Dict = 3 , snake_case_ : str = 3 , snake_case_ : Optional[int] = ("DownEncoderBlock2D",) , snake_case_ : int = ("UpDecoderBlock2D",) , snake_case_ : Optional[Any] = (6_4,) , snake_case_ : int = 1 , snake_case_ : Optional[int] = "silu" , snake_case_ : int = 4 , snake_case_ : List[str] = 3_2 , snake_case_ : Union[str, Any] = 3_2 , snake_case_ : Tuple = 0.1_8_2_1_5 , ): super().__init__() # pass init params to Encoder _UpperCAmelCase = Encoder( in_channels=A_ , out_channels=A_ , down_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , double_z=A_ , ) # pass init params to Decoder _UpperCAmelCase = Decoder( in_channels=A_ , out_channels=A_ , up_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , norm_num_groups=A_ , act_fn=A_ , ) _UpperCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) _UpperCAmelCase = nn.Convad(A_ , A_ , 1 ) _UpperCAmelCase = False _UpperCAmelCase = False # only relevant if vae tiling is enabled _UpperCAmelCase = self.config.sample_size _UpperCAmelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) _UpperCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) _UpperCAmelCase = 0.2_5 def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int]=False ): if isinstance(A_ , (Encoder, Decoder) ): _UpperCAmelCase = value def lowercase ( self : Dict , snake_case_ : str = True ): _UpperCAmelCase = use_tiling def lowercase ( self : Any ): self.enable_tiling(A_ ) def lowercase ( self : int ): _UpperCAmelCase = True def lowercase ( self : Optional[Any] ): _UpperCAmelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase ( self : Tuple ): _UpperCAmelCase = {} def fn_recursive_add_processors(snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[Any] ): if hasattr(A_ , "set_processor" ): _UpperCAmelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}' , A_ , A_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(A_ , A_ , A_ ) return processors def lowercase ( self : Optional[int] , snake_case_ : Dict ): _UpperCAmelCase = len(self.attn_processors.keys() ) if isinstance(A_ , A_ ) and len(A_ ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(A_ )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple ): if hasattr(A_ , "set_processor" ): if not isinstance(A_ , A_ ): module.set_processor(A_ ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}' , A_ , A_ ) for name, module in self.named_children(): fn_recursive_attn_processor(A_ , A_ , A_ ) def lowercase ( self : Union[str, Any] ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowercase ( self : Dict , snake_case_ : List[Any] , snake_case_ : List[str] = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(A_ , return_dict=A_ ) if self.use_slicing and x.shape[0] > 1: _UpperCAmelCase = [self.encoder(A_ ) for x_slice in x.split(1 )] _UpperCAmelCase = torch.cat(A_ ) else: _UpperCAmelCase = self.encoder(A_ ) _UpperCAmelCase = self.quant_conv(A_ ) _UpperCAmelCase = DiagonalGaussianDistribution(A_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=A_ ) def lowercase ( self : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(A_ , return_dict=A_ ) _UpperCAmelCase = self.post_quant_conv(A_ ) _UpperCAmelCase = self.decoder(A_ ) if not return_dict: return (dec,) return DecoderOutput(sample=A_ ) @apply_forward_hook def lowercase ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] = True ): if self.use_slicing and z.shape[0] > 1: _UpperCAmelCase = [self._decode(A_ ).sample for z_slice in z.split(1 )] _UpperCAmelCase = torch.cat(A_ ) else: _UpperCAmelCase = self._decode(A_ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=A_ ) def lowercase ( self : Dict , snake_case_ : int , snake_case_ : Dict , snake_case_ : Dict ): _UpperCAmelCase = min(a.shape[2] , b.shape[2] , A_ ) for y in range(A_ ): _UpperCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowercase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = min(a.shape[3] , b.shape[3] , A_ ) for x in range(A_ ): _UpperCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowercase ( self : Tuple , snake_case_ : int , snake_case_ : Optional[int] = True ): _UpperCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) _UpperCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) _UpperCAmelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. _UpperCAmelCase = [] for i in range(0 , x.shape[2] , A_ ): _UpperCAmelCase = [] for j in range(0 , x.shape[3] , A_ ): _UpperCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] _UpperCAmelCase = self.encoder(A_ ) _UpperCAmelCase = self.quant_conv(A_ ) row.append(A_ ) rows.append(A_ ) _UpperCAmelCase = [] for i, row in enumerate(A_ ): _UpperCAmelCase = [] for j, tile in enumerate(A_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _UpperCAmelCase = self.blend_v(rows[i - 1][j] , A_ , A_ ) if j > 0: _UpperCAmelCase = self.blend_h(row[j - 1] , A_ , A_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(A_ , dim=3 ) ) _UpperCAmelCase = torch.cat(A_ , dim=2 ) _UpperCAmelCase = DiagonalGaussianDistribution(A_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=A_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : str = True ): _UpperCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) _UpperCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) _UpperCAmelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. _UpperCAmelCase = [] for i in range(0 , z.shape[2] , A_ ): _UpperCAmelCase = [] for j in range(0 , z.shape[3] , A_ ): _UpperCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] _UpperCAmelCase = self.post_quant_conv(A_ ) _UpperCAmelCase = self.decoder(A_ ) row.append(A_ ) rows.append(A_ ) _UpperCAmelCase = [] for i, row in enumerate(A_ ): _UpperCAmelCase = [] for j, tile in enumerate(A_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _UpperCAmelCase = self.blend_v(rows[i - 1][j] , A_ , A_ ) if j > 0: _UpperCAmelCase = self.blend_h(row[j - 1] , A_ , A_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(A_ , dim=3 ) ) _UpperCAmelCase = torch.cat(A_ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=A_ ) def lowercase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : str = False , snake_case_ : Dict = True , snake_case_ : str = None , ): _UpperCAmelCase = sample _UpperCAmelCase = self.encode(A_ ).latent_dist if sample_posterior: _UpperCAmelCase = posterior.sample(generator=A_ ) else: _UpperCAmelCase = posterior.mode() _UpperCAmelCase = self.decode(A_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=A_ )
22
from __future__ import annotations import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =size # approximate the overall size of segment tree with given value __UpperCamelCase =[0 for i in range(0 , 4 * size )] # create array to store lazy update __UpperCamelCase =[0 for i in range(0 , 4 * size )] __UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self , A_ ) -> int: return idx * 2 def _a ( self , A_ ) -> int: return idx * 2 + 1 def _a ( self , A_ , A_ , A_ , A_ ) -> None: if left_element == right_element: __UpperCamelCase =a[left_element - 1] else: __UpperCamelCase =(left_element + right_element) // 2 self.build(self.left(A_ ) , A_ , A_ , A_ ) self.build(self.right(A_ ) , mid + 1 , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase =val if left_element != right_element: __UpperCamelCase =val __UpperCamelCase =val __UpperCamelCase =True __UpperCamelCase =True return True __UpperCamelCase =(left_element + right_element) // 2 self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ ) self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) return True def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase =(left_element + right_element) // 2 __UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ ) __UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ ) return max(A_ , A_ ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _A = 15 _A = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
62
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
12
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
0
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCAmelCase : Optional[int] = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int]=None ): """simple docstring""" if rng is None: a__ : List[Any] =random.Random() a__ : int =1 for dim in shape: total_dims *= dim a__ : Tuple =[] for _ in range(SCREAMING_SNAKE_CASE__ ): values.append(rng.randint(0 , vocab_size - 1 ) ) a__ : int =np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE__ ) return output def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None ): """simple docstring""" a__ : Any =ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__ ) # make sure that at least one token is attended to for each batch a__ : Any =1 return attn_mask @require_flax class __lowerCAmelCase : _lowercase : str = None _lowercase : Dict = () def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ , a__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 a__ : List[str] =2 a__ : Dict =inputs["input_ids"].shape[-1] // 2 a__ : Tuple =inputs["input_ids"][:max_batch_size, :sequence_length] a__ : Union[str, Any] =jnp.ones_like(A_ ) a__ : List[Any] =attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens a__ : str =input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` a__ : Any =config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self ) -> Any: '''simple docstring''' a__ , a__ , a__ , a__ : Any =self._get_input_ids_and_config() a__ : Any =False a__ : List[Any] =max_length a__ : List[Any] =0 for model_class in self.all_generative_model_classes: a__ : Union[str, Any] =model_class(A_ ) a__ : Optional[int] =model_class.__name__[4:] # Skip the "Flax" at the beginning a__ : List[Any] =getattr(A_ , A_ ) a__ : Dict =pt_model_class(A_ ).eval() a__ : Union[str, Any] =load_flax_weights_in_pytorch_model(A_ , flax_model.params ) a__ : int =flax_model.generate(A_ ).sequences a__ : Optional[Any] =pt_model.generate(torch.tensor(A_ , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: a__ : Tuple =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ , a__ , a__ , a__ : Optional[Any] =self._get_input_ids_and_config() a__ : int =False a__ : List[Any] =max_length for model_class in self.all_generative_model_classes: a__ : Dict =model_class(A_ ) a__ : Optional[int] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : List[Any] =jit(model.generate ) a__ : str =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> int: '''simple docstring''' a__ , a__ , a__ , a__ : Optional[Any] =self._get_input_ids_and_config() a__ : Any =True a__ : int =max_length for model_class in self.all_generative_model_classes: a__ : List[str] =model_class(A_ ) a__ : Optional[Any] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : List[Any] =jit(model.generate ) a__ : List[str] =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ , a__ , a__ , a__ : Tuple =self._get_input_ids_and_config() a__ : Tuple =False a__ : Union[str, Any] =max_length a__ : Dict =2 for model_class in self.all_generative_model_classes: a__ : List[Any] =model_class(A_ ) a__ : Optional[Any] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : Optional[Any] =jit(model.generate ) a__ : Optional[int] =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ , a__ , a__ , a__ : Tuple =self._get_input_ids_and_config() a__ : Optional[Any] =False a__ : str =max_length a__ : List[Any] =2 a__ : Dict =2 for model_class in self.all_generative_model_classes: a__ : Dict =model_class(A_ ) a__ : int =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ , a__ , a__ , a__ : List[str] =self._get_input_ids_and_config() a__ : str =True a__ : Optional[Any] =max_length a__ : Optional[int] =0.8 a__ : List[str] =1_0 a__ : List[Any] =0.3 a__ : Optional[Any] =1 a__ : List[Any] =8 a__ : List[str] =9 for model_class in self.all_generative_model_classes: a__ : str =model_class(A_ ) a__ : Optional[Any] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : Optional[int] =jit(model.generate ) a__ : int =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ , a__ , a__ , a__ : Union[str, Any] =self._get_input_ids_and_config() a__ : List[str] =max_length a__ : List[str] =1 a__ : Dict =8 a__ : Any =9 for model_class in self.all_generative_model_classes: a__ : str =model_class(A_ ) a__ : Union[str, Any] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : List[Any] =jit(model.generate ) a__ : Dict =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ , a__ , a__ , a__ : str =self._get_input_ids_and_config() a__ : List[Any] =max_length a__ : int =2 a__ : Dict =1 a__ : Any =8 a__ : Any =9 for model_class in self.all_generative_model_classes: a__ : Union[str, Any] =model_class(A_ ) a__ : Optional[int] =model.generate(A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : Optional[Any] =jit(model.generate ) a__ : Optional[Any] =jit_generate(A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> Dict: '''simple docstring''' a__ , a__ , a__ , a__ : List[Any] =self._get_input_ids_and_config() # pad attention mask on the left a__ : Optional[int] =attention_mask.at[(0, 0)].set(0 ) a__ : List[Any] =False a__ : int =max_length for model_class in self.all_generative_model_classes: a__ : Optional[Any] =model_class(A_ ) a__ : List[Any] =model.generate(A_ , attention_mask=A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : int =jit(model.generate ) a__ : int =jit_generate(A_ , attention_mask=A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ , a__ , a__ , a__ : str =self._get_input_ids_and_config() # pad attention mask on the left a__ : int =attention_mask.at[(0, 0)].set(0 ) a__ : Any =True a__ : int =max_length for model_class in self.all_generative_model_classes: a__ : List[Any] =model_class(A_ ) a__ : Any =model.generate(A_ , attention_mask=A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : Union[str, Any] =jit(model.generate ) a__ : Tuple =jit_generate(A_ , attention_mask=A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _lowercase ( self ) -> int: '''simple docstring''' a__ , a__ , a__ , a__ : Any =self._get_input_ids_and_config() # pad attention mask on the left a__ : List[str] =attention_mask.at[(0, 0)].set(0 ) a__ : int =2 a__ : List[Any] =max_length for model_class in self.all_generative_model_classes: a__ : List[Any] =model_class(A_ ) a__ : Tuple =model.generate(A_ , attention_mask=A_ ).sequences self.assertEqual(generation_outputs.shape[-1] , A_ ) a__ : int =jit(model.generate ) a__ : Dict =jit_generate(A_ , attention_mask=A_ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ : str =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) a__ : int =FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) a__ : List[Any] ="Hello world" a__ : List[str] =tokenizer(A_ , return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(A_ , "do_samples" ): model.generate(A_ , do_samples=A_ ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(A_ , "foo" ): a__ : int ={"foo": "bar"} model.generate(A_ , **A_ )
95
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip_vision_model" def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =intermediate_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =patch_size __UpperCamelCase =image_size __UpperCamelCase =initializer_range __UpperCamelCase =attention_dropout __UpperCamelCase =layer_norm_eps __UpperCamelCase =hidden_act __UpperCamelCase =qkv_bias @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "instructblip_qformer" def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =position_embedding_type __UpperCamelCase =cross_attention_frequency __UpperCamelCase =encoder_hidden_size @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip" UpperCAmelCase__ : Optional[Any] = True def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]: super().__init__(**A_ ) if vision_config is None: __UpperCamelCase ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCamelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase =InstructBlipVisionConfig(**A_ ) __UpperCamelCase =InstructBlipQFormerConfig(**A_ ) __UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase =self.text_config.tie_word_embeddings __UpperCamelCase =self.text_config.is_encoder_decoder __UpperCamelCase =num_query_tokens __UpperCamelCase =self.vision_config.hidden_size __UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase =1.0 __UpperCamelCase =0.02 @classmethod def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.vision_config.to_dict() __UpperCamelCase =self.qformer_config.to_dict() __UpperCamelCase =self.text_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
209
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _A = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _A = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(rows * cols * num_images ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) __UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =labels_dense.shape[0] __UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes __UpperCamelCase =numpy.zeros((num_labels, num_classes) ) __UpperCamelCase =1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( A_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __UpperCamelCase =10000 __UpperCamelCase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __UpperCamelCase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase =images.astype(numpy.floataa ) __UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 ) __UpperCamelCase =images __UpperCamelCase =labels __UpperCamelCase =0 __UpperCamelCase =0 @property def _a ( self ) -> Tuple: return self._images @property def _a ( self ) -> Union[str, Any]: return self._labels @property def _a ( self ) -> Optional[Any]: return self._num_examples @property def _a ( self ) -> List[str]: return self._epochs_completed def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]: if fake_data: __UpperCamelCase =[1] * 784 __UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) __UpperCamelCase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perma] __UpperCamelCase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase =self._num_examples - start __UpperCamelCase =self._images[start : self._num_examples] __UpperCamelCase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perm] __UpperCamelCase =self.labels[perm] # Start next epoch __UpperCamelCase =0 __UpperCamelCase =batch_size - rest_num_examples __UpperCamelCase =self._index_in_epoch __UpperCamelCase =self._images[start:end] __UpperCamelCase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): gfile.MakeDirs(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase =f.size() print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' ) return filepath @deprecated( SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =fake() __UpperCamelCase =fake() __UpperCamelCase =fake() return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ ) if not source_url: # empty string check __UpperCamelCase =DEFAULT_SOURCE_URL __UpperCamelCase ='train-images-idx3-ubyte.gz' __UpperCamelCase ='train-labels-idx1-ubyte.gz' __UpperCamelCase ='t10k-images-idx3-ubyte.gz' __UpperCamelCase ='t10k-labels-idx1-ubyte.gz' __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =( 'Validation size should be between 0 and ' F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =train_images[:validation_size] __UpperCamelCase =train_labels[:validation_size] __UpperCamelCase =train_images[validation_size:] __UpperCamelCase =train_labels[validation_size:] __UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed} __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
62
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function snake_case__ : str = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s snake_case__ : Union[str, Any] = 3e8 # unit of c : m * s^-1 def _a ( lowerCamelCase: float , lowerCamelCase: float , lowerCamelCase: float ) -> Optional[int]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: __A = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: __A = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __A = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
117
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = TransfoXLTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False def _a ( self ) -> Union[str, Any]: super().setUp() __UpperCamelCase =[ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self , **A_ ) -> Optional[int]: __UpperCamelCase =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='<unk> UNwanted , running' __UpperCamelCase ='<unk> unwanted, running' return input_text, output_text def _a ( self ) -> str: __UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _a ( self ) -> Any: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self ) -> int: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) __UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' __UpperCamelCase =[ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , 'new1' )
62
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowerCamelCase_ : '''simple docstring''' @staticmethod def UpperCamelCase__ ( *__lowercase , **__lowercase) -> Union[str, Any]: pass def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' a__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Tuple: __UpperCamelCase :Optional[int] = DepthEstimationPipeline(model=A_ , image_processor=A_) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple: __UpperCamelCase :Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''') self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , A_) import datasets __UpperCamelCase :Dict = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''') __UpperCamelCase :List[Any] = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ]) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, ] , A_ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''') def UpperCamelCase__ ( self) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :List[str] = '''Intel/dpt-large''' __UpperCamelCase :int = pipeline('''depth-estimation''' , model=A_) __UpperCamelCase :Tuple = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''') __UpperCamelCase :List[Any] = hashimage(outputs['''depth''']) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 29.3_04) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_62) @require_torch def UpperCamelCase__ ( self) -> List[Any]: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = generate_pascal_triangle(SCREAMING_SNAKE_CASE__ ) for row_idx in range(SCREAMING_SNAKE_CASE__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError("The input value of \'num_rows\' should be \'int\'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of \'num_rows\' should be greater than or equal to 0" ) lowercase__ : Tuple = [] for current_row_idx in range(SCREAMING_SNAKE_CASE__ ): lowercase__ : Any = populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) triangle.append(SCREAMING_SNAKE_CASE__ ) return triangle def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowercase__ , lowercase__ : Union[str, Any] = 1, 1 for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ): calculate_current_element( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return current_row def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" lowercase__ : str = triangle[current_row_idx - 1][current_col_idx - 1] lowercase__ : int = triangle[current_row_idx - 1][current_col_idx] lowercase__ : Optional[Any] = above_to_left_elt + above_to_right_elt def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError("The input value of \'num_rows\' should be \'int\'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of \'num_rows\' should be greater than or equal to 0" ) lowercase__ : int = [[1]] for row_index in range(1 , SCREAMING_SNAKE_CASE__ ): lowercase__ : str = [0] + result[-1] + [0] lowercase__ : int = row_index + 1 # Calculate the number of distinct elements in a row lowercase__ : Tuple = sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) ) lowercase__ : List[str] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowercase__ : List[Any] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowercase__ : List[str] = row_first_half + row_second_half result.append(SCREAMING_SNAKE_CASE__ ) return result def __lowerCamelCase ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ ) -> None: lowercase__ : Optional[Any] = F"""{func.__name__}({value})""" lowercase__ : Optional[int] = timeit(F"""__main__.{call}""" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
130
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f: __UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] next(SCREAMING_SNAKE_CASE__ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =[] for dataset in encoded_datasets: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa ) __UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =mc_label __UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) __UpperCamelCase =parser.parse_args() print(SCREAMING_SNAKE_CASE__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __UpperCamelCase =['_start_', '_delimiter_', '_classify_'] __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) model.to(SCREAMING_SNAKE_CASE__ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj] logger.info('Encoding dataset...' ) __UpperCamelCase =load_rocstories_dataset(args.train_dataset ) __UpperCamelCase =load_rocstories_dataset(args.eval_dataset ) __UpperCamelCase =(train_dataset, eval_dataset) __UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) # Compute the max input length for the Transformer __UpperCamelCase =model.config.n_positions // 2 - 2 __UpperCamelCase =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1] __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size ) __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __UpperCamelCase =args.max_steps __UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1 else: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs __UpperCamelCase =list(model.named_parameters() ) __UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __UpperCamelCase =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon ) __UpperCamelCase =get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ ) if args.do_train: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch __UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __UpperCamelCase =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE__ ) if args.do_eval: model.eval() __UpperCamelCase , __UpperCamelCase =0, 0 __UpperCamelCase , __UpperCamelCase =0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch with torch.no_grad(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model( SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =mc_logits.detach().cpu().numpy() __UpperCamelCase =mc_labels.to('cpu' ).numpy() __UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __UpperCamelCase =eval_loss / nb_eval_steps __UpperCamelCase =eval_accuracy / nb_eval_examples __UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None __UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
62
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''], '''tokenization_luke''': ['''LukeTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] =[ '''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LukeForEntityClassification''', '''LukeForEntityPairClassification''', '''LukeForEntitySpanClassification''', '''LukeForMultipleChoice''', '''LukeForQuestionAnswering''', '''LukeForSequenceClassification''', '''LukeForTokenClassification''', '''LukeForMaskedLM''', '''LukeModel''', '''LukePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys a__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ): __UpperCamelCase =1 __UpperCamelCase =0 __UpperCamelCase =1 __UpperCamelCase =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
62
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : int=7 , _lowercase : Union[str, Any]=3 , _lowercase : Dict=30 , _lowercase : int=4_00 , _lowercase : Tuple=True , _lowercase : List[Any]=None , _lowercase : Any=True , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase : List[Any]=True , _lowercase : int=1 / 2_55 , _lowercase : Optional[int]=True , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33} SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = min_resolution SCREAMING_SNAKE_CASE__ = max_resolution SCREAMING_SNAKE_CASE__ = do_resize SCREAMING_SNAKE_CASE__ = size SCREAMING_SNAKE_CASE__ = do_normalize SCREAMING_SNAKE_CASE__ = image_mean SCREAMING_SNAKE_CASE__ = image_std SCREAMING_SNAKE_CASE__ = do_rescale SCREAMING_SNAKE_CASE__ = rescale_factor SCREAMING_SNAKE_CASE__ = do_pad def __a ( self : int ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __a ( self : List[Any] , _lowercase : str , _lowercase : Optional[int]=False ): """simple docstring""" if not batched: SCREAMING_SNAKE_CASE__ = image_inputs[0] if isinstance(A_ , Image.Image ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * h / w ) SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""] elif w > h: SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * w / h ) else: SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""] else: SCREAMING_SNAKE_CASE__ = [] for image in image_inputs: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE__ = max(A_ , key=lambda _lowercase : item[0] )[0] SCREAMING_SNAKE_CASE__ = max(A_ , key=lambda _lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __snake_case ( A_ , unittest.TestCase ): lowerCAmelCase_ = ConditionalDetrImageProcessor if is_vision_available() else None def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessingTester(self ) @property def __a ( self : List[Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , """image_mean""" ) ) self.assertTrue(hasattr(A_ , """image_std""" ) ) self.assertTrue(hasattr(A_ , """do_normalize""" ) ) self.assertTrue(hasattr(A_ , """do_resize""" ) ) self.assertTrue(hasattr(A_ , """size""" ) ) def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} ) self.assertEqual(image_processor.do_pad , A_ ) SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , A_ ) def __a ( self : Dict ): """simple docstring""" pass def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) SCREAMING_SNAKE_CASE__ = image_processing(A_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ = image_processing(A_ , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ = image_processing(A_ , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ = {"""image_id""": 3_97_69, """annotations""": target} # encode them SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) SCREAMING_SNAKE_CASE__ = image_processing(images=A_ , annotations=A_ , return_tensors="""pt""" ) # verify pixel values SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) ) # verify orig_size SCREAMING_SNAKE_CASE__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) ) # verify size SCREAMING_SNAKE_CASE__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) ) @slow def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.loads(f.read() ) SCREAMING_SNAKE_CASE__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target} SCREAMING_SNAKE_CASE__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) SCREAMING_SNAKE_CASE__ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="""pt""" ) # verify pixel values SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) ) # verify boxes SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) ) # verify class_labels SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) ) # verify masks SCREAMING_SNAKE_CASE__ = 82_28_73 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A_ ) # verify orig_size SCREAMING_SNAKE_CASE__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) ) # verify size SCREAMING_SNAKE_CASE__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) )
219
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A_ ( A_ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = ShapEImgaImgPipeline UpperCAmelCase_ : int = ["image"] UpperCAmelCase_ : List[str] = ["image"] UpperCAmelCase_ : int = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] UpperCAmelCase_ : List[Any] = False @property def UpperCAmelCase_ ( self : Tuple ) -> Tuple: return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> int: return 32 @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self : Any ) -> Any: return 8 @property def UpperCAmelCase_ ( self : List[str] ) -> int: torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCAmelCase : int = CLIPVisionModel(A_ ) return model @property def UpperCAmelCase_ ( self : Any ) -> List[Any]: UpperCAmelCase : Optional[int] = CLIPImageProcessor( crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , ) return image_processor @property def UpperCAmelCase_ ( self : List[Any] ) -> str: torch.manual_seed(0 ) UpperCAmelCase : List[str] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCAmelCase : Any = PriorTransformer(**A_ ) return model @property def UpperCAmelCase_ ( self : Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase : Tuple = ShapERenderer(**A_ ) return model def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase : Optional[int] = self.dummy_prior UpperCAmelCase : str = self.dummy_image_encoder UpperCAmelCase : Union[str, Any] = self.dummy_image_processor UpperCAmelCase : Union[str, Any] = self.dummy_renderer UpperCAmelCase : List[str] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=A_ , clip_sample=A_ , clip_sample_range=1.0 , ) UpperCAmelCase : List[Any] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict , lowercase_ : int=0 ) -> Any: UpperCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('mps' ): UpperCAmelCase : Tuple = torch.manual_seed(A_ ) else: UpperCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCAmelCase : Optional[int] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: UpperCAmelCase : List[Any] = 'cpu' UpperCAmelCase : Tuple = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A_ ) UpperCAmelCase : Any = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCAmelCase : int = pipe(**self.get_dummy_inputs(A_ ) ) UpperCAmelCase : Optional[Any] = output.images[0] UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCAmelCase : int = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ ( self : Any ) -> int: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase : Optional[int] = torch_device == 'cpu' UpperCAmelCase : Tuple = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=A_ , relax_max_difference=A_ , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: UpperCAmelCase : Tuple = self.get_dummy_components() UpperCAmelCase : List[Any] = self.pipeline_class(**A_ ) UpperCAmelCase : Optional[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = 2 UpperCAmelCase : str = self.get_dummy_inputs(A_ ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase : Any = batch_size * [inputs[key]] UpperCAmelCase : Optional[Any] = pipe(**A_ , num_images_per_prompt=A_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Optional[int] ) -> int: UpperCAmelCase : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) UpperCAmelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) UpperCAmelCase : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) UpperCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 ) UpperCAmelCase : Tuple = pipe( A_ , generator=A_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(A_ , A_ )
151
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' from __future__ import annotations import queue class _lowercase : '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: __lowerCAmelCase = data __lowerCAmelCase = None __lowerCAmelCase = None def UpperCamelCase_ ( ) -> List[str]: '''simple docstring''' print("""\n********Press N to stop entering at any point of time********\n""" ) __lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() __lowerCAmelCase = queue.Queue() __lowerCAmelCase = TreeNode(int(SCREAMING_SNAKE_CASE__ ) ) q.put(SCREAMING_SNAKE_CASE__ ) while not q.empty(): __lowerCAmelCase = q.get() __lowerCAmelCase = f"""Enter the left node of {node_found.data}: """ __lowerCAmelCase = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or """n""" if check == "n": return tree_node __lowerCAmelCase = TreeNode(int(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = left_node q.put(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = f"""Enter the right node of {node_found.data}: """ __lowerCAmelCase = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or """n""" if check == "n": return tree_node __lowerCAmelCase = TreeNode(int(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = right_node q.put(SCREAMING_SNAKE_CASE__ ) raise def UpperCamelCase_ ( snake_case_ : TreeNode ) -> Any: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def UpperCamelCase_ ( snake_case_ : TreeNode ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def UpperCamelCase_ ( snake_case_ : TreeNode ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def UpperCamelCase_ ( snake_case_ : TreeNode ) -> Dict: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return __lowerCAmelCase = queue.Queue() q.put(SCREAMING_SNAKE_CASE__ ) while not q.empty(): __lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCamelCase_ ( snake_case_ : TreeNode ) -> Tuple: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return __lowerCAmelCase = queue.Queue() q.put(SCREAMING_SNAKE_CASE__ ) while not q.empty(): __lowerCAmelCase = [] while not q.empty(): __lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(SCREAMING_SNAKE_CASE__ ) def UpperCamelCase_ ( snake_case_ : TreeNode ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return __lowerCAmelCase = [] __lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = n.left # end of while means current node doesn't have left child __lowerCAmelCase = stack.pop() # start to traverse its right child __lowerCAmelCase = n.right def UpperCamelCase_ ( snake_case_ : TreeNode ) -> Dict: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return __lowerCAmelCase = [] __lowerCAmelCase = node while n or stack: while n: stack.append(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = n.left __lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) __lowerCAmelCase = n.right def UpperCamelCase_ ( snake_case_ : TreeNode ) -> List[str]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node: return __lowerCAmelCase , __lowerCAmelCase = [], [] __lowerCAmelCase = node stacka.append(SCREAMING_SNAKE_CASE__ ) while stacka: # to find the reversed order of post order, store it in stack2 __lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(SCREAMING_SNAKE_CASE__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def UpperCamelCase_ ( snake_case_ : str = "" , snake_case_ : Optional[int]=50 , snake_case_ : Tuple="*" ) -> str: '''simple docstring''' if not s: return "\n" + width * char __lowerCAmelCase , __lowerCAmelCase = divmod(width - len(SCREAMING_SNAKE_CASE__ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) _A : Tuple = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 50 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
229
from typing import TYPE_CHECKING from ...utils import _LazyModule _A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ ( A_ ): _lowerCamelCase : List[str] = (DEISMultistepScheduler,) _lowerCamelCase : List[Any] = (("num_inference_steps", 25),) def lowercase ( self : Any , **snake_case_ : Optional[Any] ): _UpperCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, } config.update(**A_ ) return config def lowercase ( self : Tuple , snake_case_ : List[str]=0 , **snake_case_ : int ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , A_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**A_ ) _UpperCAmelCase = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) _UpperCAmelCase = scheduler_class.from_pretrained(A_ ) new_scheduler.set_timesteps(A_ ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(A_ , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample _UpperCAmelCase = new_scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : Any ): pass def lowercase ( self : List[Any] , snake_case_ : Any=0 , **snake_case_ : int ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , A_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) _UpperCAmelCase = scheduler_class.from_pretrained(A_ ) # copy over dummy past residuals new_scheduler.set_timesteps(A_ ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample _UpperCAmelCase = new_scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : str , snake_case_ : str=None , **snake_case_ : Union[str, Any] ): if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**A_ ) _UpperCAmelCase = scheduler_class(**A_ ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**A_ ) _UpperCAmelCase = scheduler_class(**A_ ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(A_ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(A_ , A_ ) _UpperCAmelCase = scheduler.step(A_ , A_ , A_ ).prev_sample return sample def lowercase ( self : Optional[int] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , A_ ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**A_ ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ): scheduler.set_timesteps(A_ ) elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample _UpperCAmelCase = scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase ( self : Dict ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=A_ ) _UpperCAmelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=A_ ) _UpperCAmelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def lowercase ( self : Any ): for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=A_ ) def lowercase ( self : List[str] ): self.check_over_configs(thresholding=A_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , algorithm_type="deis" , solver_order=A_ , solver_type=A_ , ) def lowercase ( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def lowercase ( self : Dict ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=A_ , solver_type=A_ , prediction_type=A_ , algorithm_type=A_ , ) _UpperCAmelCase = self.full_loop( solver_order=A_ , solver_type=A_ , prediction_type=A_ , algorithm_type=A_ , ) assert not torch.isnan(A_ ).any(), "Samples have nan numbers" def lowercase ( self : Union[str, Any] ): self.check_over_configs(lower_order_final=A_ ) self.check_over_configs(lower_order_final=A_ ) def lowercase ( self : Optional[int] ): for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=A_ , time_step=0 ) def lowercase ( self : Any ): _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def lowercase ( self : Dict ): _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=A_ , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**A_ ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(A_ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(A_ , A_ ) _UpperCAmelCase = scheduler.step(A_ , A_ , A_ ).prev_sample assert sample.dtype == torch.floataa
22
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ ) -> float: return 0.0 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) ) plt.show()
62
0
UpperCAmelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} UpperCAmelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def lowerCamelCase__ ( A__ : dict[int, list[int]] , A__ : int , A__ : list[bool] ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def lowerCamelCase__ ( A__ : dict[int, list[int]] , A__ : int , A__ : list[bool] ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def lowerCamelCase__ ( A__ : dict[int, list[int]] ): '''simple docstring''' __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) * [False] __lowerCamelCase = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [] __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __lowerCamelCase = order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __lowerCamelCase = find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
12
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCAmelCase ( A_): _lowercase : Optional[int] = "bart" _lowercase : List[Any] = ["past_key_values"] _lowercase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , lowerCAmelCase__=5_0_2_6_5 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=3 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Optional[int]: '''simple docstring''' a__ : List[str] =vocab_size a__ : List[str] =max_position_embeddings a__ : Tuple =d_model a__ : Union[str, Any] =encoder_ffn_dim a__ : Union[str, Any] =encoder_layers a__ : str =encoder_attention_heads a__ : Tuple =decoder_ffn_dim a__ : Tuple =decoder_layers a__ : List[Any] =decoder_attention_heads a__ : Optional[Any] =dropout a__ : Tuple =attention_dropout a__ : Any =activation_dropout a__ : List[str] =activation_function a__ : Dict =init_std a__ : str =encoder_layerdrop a__ : Any =decoder_layerdrop a__ : Tuple =classifier_dropout a__ : Optional[Any] =use_cache a__ : int =encoder_layers a__ : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): a__ : Optional[int] =self.bos_token_id warnings.warn( F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' "The config can simply be saved and uploaded again to be fixed." ) class __lowerCAmelCase ( A_): @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a__ : List[str] =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: a__ : int ={0: "batch"} a__ : List[str] ={0: "batch", 1: "past_decoder_sequence + sequence"} else: a__ : Tuple ={0: "batch", 1: "decoder_sequence"} a__ : List[str] ={0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. a__ : List[Any] =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: a__ , a__ : Tuple =self.num_layers for i in range(A_ ): a__ : int ={0: "batch", 2: "past_sequence + sequence"} a__ : Any ={0: "batch", 2: "past_sequence + sequence"} else: a__ : Optional[int] =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a__ : Any =super().outputs else: a__ : Any =super(A_ , self ).outputs if self.use_past: a__ , a__ : List[Any] =self.num_layers for i in range(A_ ): a__ : Tuple ={0: "batch", 2: "past_sequence + sequence"} a__ : List[Any] ={0: "batch", 2: "past_sequence + sequence"} return common_outputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' a__ : Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs a__ : Optional[int] =seq_length if not self.use_past else 1 a__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) a__ : Optional[Any] ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a__ : List[str] =dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch a__ , a__ : Tuple =common_inputs["input_ids"].shape a__ : Tuple =common_inputs["decoder_input_ids"].shape[1] a__ , a__ : int =self.num_attention_heads a__ : Union[str, Any] =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a__ : Dict =decoder_seq_length + 3 a__ : Any =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a__ : int =torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) a__ : Any =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered a__ , a__ : Any =self.num_layers a__ : Tuple =min(A_ , A_ ) a__ : Optional[int] =max(A_ , A_ ) - min_num_layers a__ : Union[str, Any] ="encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. a__ : List[str] =encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' a__ : Dict =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch a__ , a__ : int =common_inputs["input_ids"].shape # Not using the same length for past_key_values a__ : Optional[int] =seqlen + 2 a__ , a__ : Dict =self.num_layers a__ , a__ : Optional[int] =self.num_attention_heads a__ : Any =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a__ : str =common_inputs["attention_mask"].dtype a__ : Tuple =torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) a__ : Optional[int] =[ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' a__ : int =compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a__ : Tuple =tokenizer.num_special_tokens_to_add(A_ ) a__ : str =compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence a__ : int =[" ".join([tokenizer.unk_token] ) * seq_length] * batch_size a__ : Union[str, Any] =dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a__ : List[str] =self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": a__ : Tuple =self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: a__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: a__ : int =super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: a__ : Optional[Any] =super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
95
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "mvp" UpperCAmelCase__ : Tuple = ["past_key_values"] UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]: __UpperCamelCase =vocab_size __UpperCamelCase =max_position_embeddings __UpperCamelCase =d_model __UpperCamelCase =encoder_ffn_dim __UpperCamelCase =encoder_layers __UpperCamelCase =encoder_attention_heads __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_attention_heads __UpperCamelCase =dropout __UpperCamelCase =attention_dropout __UpperCamelCase =activation_dropout __UpperCamelCase =activation_function __UpperCamelCase =init_std __UpperCamelCase =encoder_layerdrop __UpperCamelCase =decoder_layerdrop __UpperCamelCase =classifier_dropout __UpperCamelCase =use_cache __UpperCamelCase =encoder_layers __UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase =use_prompt __UpperCamelCase =prompt_length __UpperCamelCase =prompt_mid_dim super().__init__( pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ): __UpperCamelCase =self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
62
0
def lowerCAmelCase__(__snake_case ) -> Dict: '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) # No of vertices in graph lowerCamelCase__ = [0] * n lowerCamelCase__ = [False] * n def dfs(__snake_case ,__snake_case ,__snake_case ,__snake_case ): lowerCamelCase__ = True lowerCamelCase__ = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,id_ ) lowerCamelCase__ = min(low[at] ,low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge lowerCamelCase__ = min(low[at] ,low[to] ) lowerCamelCase__ = [] for i in range(SCREAMING_SNAKE_CASE__ ): if not visited[i]: dfs(SCREAMING_SNAKE_CASE__ ,-1 ,SCREAMING_SNAKE_CASE__ ,id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
209
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = GPTaTokenizer UpperCAmelCase__ : Any = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : int = {"add_prefix_space": True} UpperCAmelCase__ : Any = False def _a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _a ( self , **A_ ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='lower newer' __UpperCamelCase ='lower newer' return input_text, output_text def _a ( self ) -> List[Any]: __UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase ='lower newer' __UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self ) -> int: if not self.test_rust_tokenizer: return __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase ='lower newer' # Testing tokenization __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token __UpperCamelCase =tokens + [rust_tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self , *A_ , **A_ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _a ( self , A_=15 ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def _a ( self ) -> int: __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __UpperCamelCase =tokenizer.pad_token_id __UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) __UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ='$$$' __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =tokenizer.bos_token_id __UpperCamelCase =tokenizer(A_ ) __UpperCamelCase =tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase =tokenizer.decode(out_s.input_ids ) __UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> Any: # TODO: change to self.get_tokenizers() when the fast version is implemented __UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase ='Encode this.' __UpperCamelCase ='This one too please.' __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) __UpperCamelCase =tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) __UpperCamelCase =encoded_sequence_dict['input_ids'] __UpperCamelCase =encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) , len(A_ ) ) __UpperCamelCase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] __UpperCamelCase =[x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) def _a ( self ) -> Dict: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _a ( self ) -> List[Any]: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='bos' __UpperCamelCase =tokenizer.get_vocab()['bos'] __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
62
0
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: List[str] , lowerCamelCase: str=[] ) -> int: '''simple docstring''' __A = size[0] - overlap_pixels * 2 __A = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels __A = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 __A = np.pad(SCREAMING_SNAKE_CASE__ , mode='''linear_ramp''' , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 ) if "l" in remove_borders: __A = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: __A = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: __A = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: __A = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def _a ( lowerCamelCase: Tuple , lowerCamelCase: List[str] , lowerCamelCase: List[str] ) -> Optional[Any]: '''simple docstring''' return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) def _a ( lowerCamelCase: [int] , lowerCamelCase: [int] , lowerCamelCase: [int] ) -> Any: '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def _a ( lowerCamelCase: [int] , lowerCamelCase: int , lowerCamelCase: [int] ) -> Tuple: '''simple docstring''' __A = list(SCREAMING_SNAKE_CASE__ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap __A = clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] ) return rect def _a ( lowerCamelCase: Tuple , lowerCamelCase: Any , lowerCamelCase: Any , lowerCamelCase: Optional[int] ) -> Any: '''simple docstring''' __A = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) ) return result def _a ( lowerCamelCase: int , lowerCamelCase: Dict ) -> Tuple: '''simple docstring''' __A = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) __A = tile.crop(SCREAMING_SNAKE_CASE__ ) return tile def _a ( lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] ) -> List[Any]: '''simple docstring''' __A = n % d return n - divisor class A_ ( A_ ): def __init__(self :List[Any] , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :List[str] , _UpperCamelCase :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[Any] = 350 , )-> Union[str, Any]: super().__init__( vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , low_res_scheduler=A_ , scheduler=A_ , max_noise_level=A_ , ) def _lowerCAmelCase (self :Dict , _UpperCamelCase :Any , _UpperCamelCase :int , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , **_UpperCamelCase :str )-> Union[str, Any]: torch.manual_seed(0 ) __A = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) __A = add_overlap_rect(A_ , A_ , image.size ) __A = image.crop(A_ ) __A = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] __A = translated_slice_x - (original_image_slice / 2) __A = max(0 , A_ ) __A = squeeze_tile(A_ , A_ , A_ , A_ ) __A = to_input.size __A = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) __A = super(A_ , self ).__call__(image=A_ , **A_ ).images[0] __A = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) __A = unsqueeze_tile(A_ , A_ ) __A = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) __A = [] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) __A = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A_ ) , mode='''L''' , ) final_image.paste( A_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A_ ) @torch.no_grad() def __call__(self :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :str = 75 , _UpperCamelCase :Tuple = 9.0 , _UpperCamelCase :int = 50 , _UpperCamelCase :Union[str, Any] = None , _UpperCamelCase :Tuple = 1 , _UpperCamelCase :int = 0.0 , _UpperCamelCase :List[str] = None , _UpperCamelCase :List[str] = None , _UpperCamelCase :Union[str, Any] = None , _UpperCamelCase :str = 1 , _UpperCamelCase :Union[str, Any] = 128 , _UpperCamelCase :int = 32 , _UpperCamelCase :Optional[Any] = 32 , )-> Tuple: __A = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) __A = math.ceil(image.size[0] / tile_size ) __A = math.ceil(image.size[1] / tile_size ) __A = tcx * tcy __A = 0 for y in range(A_ ): for x in range(A_ ): self._process_tile( A_ , A_ , A_ , A_ , A_ , A_ , A_ , prompt=A_ , num_inference_steps=A_ , guidance_scale=A_ , noise_level=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def _a ( ) -> int: '''simple docstring''' __A = '''stabilityai/stable-diffusion-x4-upscaler''' __A = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='''fp16''' , torch_dtype=torch.floataa ) __A = pipe.to('''cuda''' ) __A = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(lowerCamelCase: List[str] ): print(F"""progress: {obj['progress']:.4f}""" ) obj["image"].save('''diffusers_library_progress.jpg''' ) __A = pipe(image=SCREAMING_SNAKE_CASE__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=SCREAMING_SNAKE_CASE__ ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
117
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
62
0
from __future__ import annotations class lowerCamelCase_ : '''simple docstring''' def __init__( self , __lowercase) -> None: __UpperCamelCase :str = data __UpperCamelCase :Any = None __UpperCamelCase :Optional[int] = None def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowerCamelCase ( ): # Main function for testing. '''simple docstring''' __UpperCamelCase :List[Any] = Node(1 ) __UpperCamelCase :List[Any] = Node(2 ) __UpperCamelCase :List[str] = Node(3 ) __UpperCamelCase :int = Node(4 ) __UpperCamelCase :Optional[Any] = Node(5 ) __UpperCamelCase :int = Node(6 ) __UpperCamelCase :int = Node(7 ) __UpperCamelCase :Any = Node(8 ) __UpperCamelCase :str = Node(9 ) print(is_full_binary_tree(SCREAMING_SNAKE_CASE__ ) ) print(depth_of_tree(SCREAMING_SNAKE_CASE__ ) ) print('''Tree is: ''' ) display(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
43
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ): __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] __UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
62
0
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(A_ , unittest.TestCase ): """simple docstring""" lowercase_ = LxmertTokenizer lowercase_ = LxmertTokenizerFast lowercase_ = True lowercase_ = True def snake_case ( self : Any ): super().setUp() lowercase__ : str = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[Any] = "UNwant\u00E9d,running" lowercase__ : Optional[Any] = "unwanted, running" return input_text, output_text def snake_case ( self : Dict ): lowercase__ : Optional[int] = self.tokenizer_class(self.vocab_file ) lowercase__ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(A_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] ) def snake_case ( self : int ): if not self.test_rust_tokenizer: return lowercase__ : Tuple = self.get_tokenizer() lowercase__ : str = self.get_rust_tokenizer() lowercase__ : str = "I was born in 92000, and this is falsé." lowercase__ : str = tokenizer.tokenize(A_ ) lowercase__ : Union[str, Any] = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowercase__ : str = tokenizer.encode(A_ , add_special_tokens=A_ ) lowercase__ : List[str] = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : Any = tokenizer.encode(A_ ) lowercase__ : Optional[Any] = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ )
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '▁' _A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} _A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } _A = {'vinai/bartpho-syllable': 1024} class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase =vocab_file __UpperCamelCase =monolingual_vocab_file __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase ={} __UpperCamelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =cnt cnt += 1 with open(A_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase =line.strip().split()[0] __UpperCamelCase =len(self.fairseq_tokens_to_ids ) if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =len(self.fairseq_tokens_to_ids ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> List[str]: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase =[self.cls_token_id] __UpperCamelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> Any: return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def _a ( self , A_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , A_ ) -> int: return self.fairseq_ids_to_tokens[index] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(A_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( A_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , A_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(A_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(A_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
62
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dict , __A : Any=1_3 , __A : Union[str, Any]=3_2 , __A : Dict=3 , __A : List[Any]=4 , __A : str=[1_0, 2_0, 3_0, 4_0] , __A : Tuple=[2, 2, 3, 2] , __A : Optional[Any]=True , __A : Optional[int]=True , __A : Tuple=3_7 , __A : int="gelu" , __A : Optional[int]=1_0 , __A : List[Any]=0.02 , __A : Union[str, Any]=["stage2", "stage3", "stage4"] , __A : Dict=[2, 3, 4] , __A : Union[str, Any]=None , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = num_stages __UpperCamelCase = hidden_sizes __UpperCamelCase = depths __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = num_labels __UpperCamelCase = initializer_range __UpperCamelCase = out_features __UpperCamelCase = out_indices __UpperCamelCase = scope def _lowerCamelCase ( self : str ): __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : int ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : int , __A : List[Any] ): __UpperCamelCase = ConvNextModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _lowerCamelCase ( self : Tuple , __A : Union[str, Any] , __A : Optional[int] , __A : Dict ): __UpperCamelCase = ConvNextForImageClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , __A : List[str] , __A : Union[str, Any] , __A : Union[str, Any] ): __UpperCamelCase = ConvNextBackbone(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCamelCase = None __UpperCamelCase = ConvNextBackbone(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( A_ , A_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Dict =( {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Dict =True SCREAMING_SNAKE_CASE_ : Tuple =False SCREAMING_SNAKE_CASE_ : Optional[Any] =False SCREAMING_SNAKE_CASE_ : List[Any] =False SCREAMING_SNAKE_CASE_ : str =False def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = ConvNextModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 ) def _lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : Dict ): return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def _lowerCamelCase ( self : List[str] ): pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def _lowerCamelCase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def _lowerCamelCase ( self : List[Any] ): pass def _lowerCamelCase ( self : Tuple ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(A_ ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def _lowerCamelCase ( self : Any ): def check_hidden_states_output(__A : Optional[int] , __A : Optional[int] , __A : List[str] ): __UpperCamelCase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) ) __UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(A_ , A_ , A_ ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def _lowerCamelCase ( self : Union[str, Any] ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = ConvNextModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def lowercase__ ( ) -> Tuple: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : List[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def _lowerCamelCase ( self : str ): __UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(A_ ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**A_ ) # verify the logits __UpperCamelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , A_ ) __UpperCamelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @require_torch class snake_case ( unittest.TestCase , A_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =(ConvNextBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Optional[Any] =ConvNextConfig SCREAMING_SNAKE_CASE_ : Any =False def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = ConvNextModelTester(self )
53
from numpy import exp, pi, sqrt def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
62
0
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __lowerCamelCase : Optional[int] = 5_0000 __lowerCamelCase : Optional[Any] = 5000 __lowerCamelCase , __lowerCamelCase : Any = os.path.split(__file__) __lowerCamelCase : int = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : datasets.Dataset , __UpperCamelCase : Any ) -> List[str]: """simple docstring""" for i in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ = dataset[i] @get_duration def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : datasets.Dataset , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ) -> Any: """simple docstring""" for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ = dataset[i : i + batch_size] @get_duration def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : datasets.Dataset , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Union[str, Any]: """simple docstring""" with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ): for i in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ = dataset[i] @get_duration def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : datasets.Dataset , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict ) -> Optional[int]: """simple docstring""" with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ): for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ = dataset[i : i + batch_size] def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""num examples""": SPEED_TEST_N_EXAMPLES} SCREAMING_SNAKE_CASE__ = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] SCREAMING_SNAKE_CASE__ = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) SCREAMING_SNAKE_CASE__ = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) SCREAMING_SNAKE_CASE__ = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE__ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={"""list""": (1_00,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) print("""shuffling dataset""" ) SCREAMING_SNAKE_CASE__ = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ = func( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
219
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None: super().__init__(**A_ ) __UpperCamelCase =size if size is not None else {'shortest_edge': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) __UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) __UpperCamelCase =do_resize __UpperCamelCase =size __UpperCamelCase =resample __UpperCamelCase =do_center_crop __UpperCamelCase =crop_size __UpperCamelCase =do_rescale __UpperCamelCase =rescale_factor __UpperCamelCase =do_normalize __UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase =do_convert_rgb def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image: __UpperCamelCase =do_resize if do_resize is not None else self.do_resize __UpperCamelCase =size if size is not None else self.size __UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ ) __UpperCamelCase =resample if resample is not None else self.resample __UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase =crop_size if crop_size is not None else self.crop_size __UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) __UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase =image_mean if image_mean is not None else self.image_mean __UpperCamelCase =image_std if image_std is not None else self.image_std __UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase =make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase =[convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase =[to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images] __UpperCamelCase ={'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
62
0
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('Quantized models are not supported.' ) UpperCAmelCase : Dict = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , SCREAMING_SNAKE_CASE__ ) if matches: UpperCAmelCase : Tuple = float(matches[1] ) UpperCAmelCase : int = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". UpperCAmelCase : Any = 10_01 UpperCAmelCase : Tuple = 'imagenet-1k-id2label.json' UpperCAmelCase : Dict = 'huggingface/label-files' UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) UpperCAmelCase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ) + 1: v for k, v in idalabel.items()} UpperCAmelCase : Dict = 'background' UpperCAmelCase : Union[str, Any] = idalabel UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def UpperCamelCase( ): UpperCAmelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ): UpperCAmelCase : Tuple = get_mobilenet_va_config(SCREAMING_SNAKE_CASE__ ) # Load 🤗 model UpperCAmelCase : Any = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor UpperCAmelCase : str = MobileNetVaImageProcessor( crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , ) UpperCAmelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) UpperCAmelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[str] = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": UpperCAmelCase : str = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": UpperCAmelCase : Optional[Any] = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: UpperCAmelCase : List[str] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('Pushing to the hub...' ) UpperCAmelCase : int = 'google/' + model_name image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase__ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
151
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =qkv_bias __UpperCamelCase =num_detection_tokens __UpperCamelCase =use_mid_position_embeddings __UpperCamelCase =auxiliary_loss # Hungarian matcher __UpperCamelCase =class_cost __UpperCamelCase =bbox_cost __UpperCamelCase =giou_cost # Loss coefficients __UpperCamelCase =bbox_loss_coefficient __UpperCamelCase =giou_loss_coefficient __UpperCamelCase =eos_coefficient class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self ) -> float: return 1E-4 @property def _a ( self ) -> int: return 12
62
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _A : int = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict = ['''DeiTFeatureExtractor'''] _A : Tuple = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : List[str] = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : str = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
229
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[Any] ) -> Dict: '''simple docstring''' return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Any="attention" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = _UpperCAmelCase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) _UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) _UpperCAmelCase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) _UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) _UpperCAmelCase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) _UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) _UpperCAmelCase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) _UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[Any]=False ) -> str: '''simple docstring''' if split_mlp_wi: _UpperCAmelCase = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] _UpperCAmelCase = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] _UpperCAmelCase = (wi_a, wi_a) else: _UpperCAmelCase = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] _UpperCAmelCase = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : Optional[Any] ) -> Dict: '''simple docstring''' return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i] def UpperCAmelCase_ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = traverse_util.flatten_dict(variables["target"] ) _UpperCAmelCase = {"/".join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _UpperCAmelCase = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase = collections.OrderedDict() # Shared embeddings. _UpperCAmelCase = old["token_embedder/embedding"] # Encoder. for i in range(SCREAMING_SNAKE_CASE__ ): # Block i, layer 0 (Self Attention). _UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "pre_attention_layer_norm" ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "attention" ) _UpperCAmelCase = layer_norm _UpperCAmelCase = k.T _UpperCAmelCase = o.T _UpperCAmelCase = q.T _UpperCAmelCase = v.T # Block i, layer 1 (MLP). _UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "pre_mlp_layer_norm" ) _UpperCAmelCase , _UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase = layer_norm if split_mlp_wi: _UpperCAmelCase = wi[0].T _UpperCAmelCase = wi[1].T else: _UpperCAmelCase = wi.T _UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer _UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" ).T _UpperCAmelCase = old["encoder/encoder_norm/scale"] if not scalable_attention: _UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE__ , 0 , "encoder" ).T _UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE__ , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE__ ): # Block i, layer 0 (Self Attention). _UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_self_attention_layer_norm" ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "self_attention" ) _UpperCAmelCase = layer_norm _UpperCAmelCase = k.T _UpperCAmelCase = o.T _UpperCAmelCase = q.T _UpperCAmelCase = v.T # Block i, layer 1 (Cross Attention). _UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_cross_attention_layer_norm" ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "encoder_decoder_attention" ) _UpperCAmelCase = layer_norm _UpperCAmelCase = k.T _UpperCAmelCase = o.T _UpperCAmelCase = q.T _UpperCAmelCase = v.T # Block i, layer 2 (MLP). _UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_mlp_layer_norm" ) _UpperCAmelCase , _UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase = layer_norm if split_mlp_wi: _UpperCAmelCase = wi[0].T _UpperCAmelCase = wi[1].T else: _UpperCAmelCase = wi.T _UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer _UpperCAmelCase = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" ).T _UpperCAmelCase = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _UpperCAmelCase = old["decoder/logits_dense/kernel"].T return new def UpperCAmelCase_ ( __lowercase : int , __lowercase : bool ) -> List[str]: '''simple docstring''' _UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _UpperCAmelCase = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _UpperCAmelCase = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) _UpperCAmelCase = state_dict["shared.weight"] return state_dict def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[str] ) -> str: '''simple docstring''' _UpperCAmelCase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase = convert_tax_to_pytorch( SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : List[Any] , __lowercase : int , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print(f'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _UpperCAmelCase = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ ) else: _UpperCAmelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE__ ) print("Done" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
22
from __future__ import annotations import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =size # approximate the overall size of segment tree with given value __UpperCamelCase =[0 for i in range(0 , 4 * size )] # create array to store lazy update __UpperCamelCase =[0 for i in range(0 , 4 * size )] __UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self , A_ ) -> int: return idx * 2 def _a ( self , A_ ) -> int: return idx * 2 + 1 def _a ( self , A_ , A_ , A_ , A_ ) -> None: if left_element == right_element: __UpperCamelCase =a[left_element - 1] else: __UpperCamelCase =(left_element + right_element) // 2 self.build(self.left(A_ ) , A_ , A_ , A_ ) self.build(self.right(A_ ) , mid + 1 , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase =val if left_element != right_element: __UpperCamelCase =val __UpperCamelCase =val __UpperCamelCase =True __UpperCamelCase =True return True __UpperCamelCase =(left_element + right_element) // 2 self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ ) self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) return True def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase =(left_element + right_element) // 2 __UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ ) __UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ ) return max(A_ , A_ ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _A = 15 _A = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
62
0
import os def lowerCamelCase__ ( ): '''simple docstring''' with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/p022_names.txt""" ) as file: __lowerCamelCase = str(file.readlines()[0] ) __lowerCamelCase = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() __lowerCamelCase = 0 __lowerCamelCase = 0 for i, name in enumerate(SCREAMING_SNAKE_CASE__ ): for letter in name: name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64 total_score += (i + 1) * name_score __lowerCamelCase = 0 return total_score if __name__ == "__main__": print(solution())
12
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
0
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE__ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
95
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip_vision_model" def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =intermediate_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =patch_size __UpperCamelCase =image_size __UpperCamelCase =initializer_range __UpperCamelCase =attention_dropout __UpperCamelCase =layer_norm_eps __UpperCamelCase =hidden_act __UpperCamelCase =qkv_bias @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "instructblip_qformer" def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =position_embedding_type __UpperCamelCase =cross_attention_frequency __UpperCamelCase =encoder_hidden_size @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip" UpperCAmelCase__ : Optional[Any] = True def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]: super().__init__(**A_ ) if vision_config is None: __UpperCamelCase ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCamelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase =InstructBlipVisionConfig(**A_ ) __UpperCamelCase =InstructBlipQFormerConfig(**A_ ) __UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase =self.text_config.tie_word_embeddings __UpperCamelCase =self.text_config.is_encoder_decoder __UpperCamelCase =num_query_tokens __UpperCamelCase =self.vision_config.hidden_size __UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase =1.0 __UpperCamelCase =0.02 @classmethod def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.vision_config.to_dict() __UpperCamelCase =self.qformer_config.to_dict() __UpperCamelCase =self.text_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
0
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __A ( A_ , A_ , A_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) torch.manual_seed(0 ) lowerCamelCase__ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCamelCase__ = CLIPTextModel(A_ ) lowerCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ): '''simple docstring''' if str(A_ ).startswith('''mps''' ): lowerCamelCase__ = torch.manual_seed(A_ ) else: lowerCamelCase__ = torch.Generator(device=A_ ).manual_seed(A_ ) lowerCamelCase__ = 2 lowerCamelCase__ = randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ) lowerCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(A_ ) ).to(A_ ) lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __lowerCamelCase ( self ): '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class __A ( A_ , A_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) def init_weights(__lowerCAmelCase ): if isinstance(A_ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(A_ ) torch.manual_seed(0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(A_ ) torch.manual_seed(0 ) lowerCamelCase__ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCamelCase__ = CLIPTextModel(A_ ) lowerCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ = MultiControlNetModel([controlneta, controlneta] ) lowerCamelCase__ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ): '''simple docstring''' if str(A_ ).startswith('''mps''' ): lowerCamelCase__ = torch.manual_seed(A_ ) else: lowerCamelCase__ = torch.Generator(device=A_ ).manual_seed(A_ ) lowerCamelCase__ = 2 lowerCamelCase__ = [ randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ), randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ), ] lowerCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(A_ ) ).to(A_ ) lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = self.pipeline_class(**A_ ) pipe.to(A_ ) lowerCamelCase__ = 10.0 lowerCamelCase__ = 4 lowerCamelCase__ = self.get_dummy_inputs(A_ ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**A_ )[0] lowerCamelCase__ = self.get_dummy_inputs(A_ ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**A_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCamelCase__ = self.get_dummy_inputs(A_ ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**A_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCamelCase__ = self.get_dummy_inputs(A_ ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**A_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCamelCase ( self ): '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = self.pipeline_class(**A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(A_ ) except NotImplementedError: pass @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=A_ , controlnet=A_ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ = '''evil space-punk bird''' lowerCamelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_1_2, 5_1_2) ) lowerCamelCase__ = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_1_2, 5_1_2) ) lowerCamelCase__ = pipe( A_ , A_ , control_image=A_ , generator=A_ , output_type='''np''' , num_inference_steps=5_0 , strength=0.6 , ) lowerCamelCase__ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) lowerCamelCase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
209
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _A = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _A = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(rows * cols * num_images ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) __UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =labels_dense.shape[0] __UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes __UpperCamelCase =numpy.zeros((num_labels, num_classes) ) __UpperCamelCase =1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( A_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __UpperCamelCase =10000 __UpperCamelCase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __UpperCamelCase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase =images.astype(numpy.floataa ) __UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 ) __UpperCamelCase =images __UpperCamelCase =labels __UpperCamelCase =0 __UpperCamelCase =0 @property def _a ( self ) -> Tuple: return self._images @property def _a ( self ) -> Union[str, Any]: return self._labels @property def _a ( self ) -> Optional[Any]: return self._num_examples @property def _a ( self ) -> List[str]: return self._epochs_completed def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]: if fake_data: __UpperCamelCase =[1] * 784 __UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) __UpperCamelCase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perma] __UpperCamelCase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase =self._num_examples - start __UpperCamelCase =self._images[start : self._num_examples] __UpperCamelCase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perm] __UpperCamelCase =self.labels[perm] # Start next epoch __UpperCamelCase =0 __UpperCamelCase =batch_size - rest_num_examples __UpperCamelCase =self._index_in_epoch __UpperCamelCase =self._images[start:end] __UpperCamelCase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): gfile.MakeDirs(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase =f.size() print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' ) return filepath @deprecated( SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =fake() __UpperCamelCase =fake() __UpperCamelCase =fake() return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ ) if not source_url: # empty string check __UpperCamelCase =DEFAULT_SOURCE_URL __UpperCamelCase ='train-images-idx3-ubyte.gz' __UpperCamelCase ='train-labels-idx1-ubyte.gz' __UpperCamelCase ='t10k-images-idx3-ubyte.gz' __UpperCamelCase ='t10k-labels-idx1-ubyte.gz' __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =( 'Validation size should be between 0 and ' F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =train_images[:validation_size] __UpperCamelCase =train_labels[:validation_size] __UpperCamelCase =train_images[validation_size:] __UpperCamelCase =train_labels[validation_size:] __UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed} __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
62
0
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] , lowerCamelCase: List[str] , lowerCamelCase: Optional[int] , lowerCamelCase: str , lowerCamelCase: Dict ) -> List[str]: '''simple docstring''' if index == r: for j in range(SCREAMING_SNAKE_CASE__ ): print(data[j] , end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __A = arr[i] combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: List[Any] , lowerCamelCase: List[Any] ) -> Optional[int]: '''simple docstring''' __A = [0] * r # Print all combination using temporary array 'data[]' combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 ) if __name__ == "__main__": # Driver code to check the function above snake_case__ : Union[str, Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
117
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = TransfoXLTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False def _a ( self ) -> Union[str, Any]: super().setUp() __UpperCamelCase =[ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self , **A_ ) -> Optional[int]: __UpperCamelCase =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='<unk> UNwanted , running' __UpperCamelCase ='<unk> unwanted, running' return input_text, output_text def _a ( self ) -> str: __UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _a ( self ) -> Any: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self ) -> int: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) __UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' __UpperCamelCase =[ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , 'new1' )
62
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowercase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __lowerCamelCase ( *lowerCamelCase__ ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase__ : List[Any] = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): lowercase__ : Dict = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can\'t allocate memory", # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __lowerCamelCase ( lowerCamelCase__ = None , lowerCamelCase__ = 128 ): """simple docstring""" if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) lowercase__ : List[str] = starting_batch_size def decorator(*lowerCamelCase__ , **lowerCamelCase__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() lowercase__ : Any = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): lowercase__ : Optional[Any] = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
130
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f: __UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] next(SCREAMING_SNAKE_CASE__ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =[] for dataset in encoded_datasets: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa ) __UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =mc_label __UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) __UpperCamelCase =parser.parse_args() print(SCREAMING_SNAKE_CASE__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __UpperCamelCase =['_start_', '_delimiter_', '_classify_'] __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) model.to(SCREAMING_SNAKE_CASE__ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj] logger.info('Encoding dataset...' ) __UpperCamelCase =load_rocstories_dataset(args.train_dataset ) __UpperCamelCase =load_rocstories_dataset(args.eval_dataset ) __UpperCamelCase =(train_dataset, eval_dataset) __UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) # Compute the max input length for the Transformer __UpperCamelCase =model.config.n_positions // 2 - 2 __UpperCamelCase =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1] __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size ) __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __UpperCamelCase =args.max_steps __UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1 else: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs __UpperCamelCase =list(model.named_parameters() ) __UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __UpperCamelCase =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon ) __UpperCamelCase =get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ ) if args.do_train: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch __UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __UpperCamelCase =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE__ ) if args.do_eval: model.eval() __UpperCamelCase , __UpperCamelCase =0, 0 __UpperCamelCase , __UpperCamelCase =0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch with torch.no_grad(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model( SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =mc_logits.detach().cpu().numpy() __UpperCamelCase =mc_labels.to('cpu' ).numpy() __UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __UpperCamelCase =eval_loss / nb_eval_steps __UpperCamelCase =eval_accuracy / nb_eval_examples __UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None __UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
62
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a__ : Dict =get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right a__ : List[str] =256_047 a__ : Optional[int] =256_145 @require_sentencepiece @require_tokenizers class snake_case ( A_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =NllbTokenizer SCREAMING_SNAKE_CASE_ : Any =NllbTokenizerFast SCREAMING_SNAKE_CASE_ : str =True SCREAMING_SNAKE_CASE_ : Optional[Any] =True SCREAMING_SNAKE_CASE_ : Dict ={} def _lowerCamelCase ( self : int ): super().setUp() # We have a SentencePiece fixture for testing __UpperCamelCase = NllbTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = NllbTokenizer(A_ , keep_accents=A_ ) __UpperCamelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) __UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = tokenizer_r.save_pretrained(A_ ) __UpperCamelCase = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __UpperCamelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase = tokenizer_r.from_pretrained(A_ ) __UpperCamelCase = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase = tokenizer_r.from_pretrained(A_ ) __UpperCamelCase = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase = tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCamelCase = tokenizer_r.from_pretrained(A_ ) __UpperCamelCase = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): if not self.test_seqaseq: return __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. __UpperCamelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] __UpperCamelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: __UpperCamelCase = tokenizer.prepare_seqaseq_batch( src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified __UpperCamelCase = tokenizer.prepare_seqaseq_batch( A_ , tgt_texts=A_ , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __UpperCamelCase = tokenizer.prepare_seqaseq_batch( src_texts=A_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , A_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def _lowerCamelCase ( self : str ): pass def _lowerCamelCase ( self : Tuple ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCamelCase = [AddedToken('<special>' , lstrip=A_ )] __UpperCamelCase = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) __UpperCamelCase = tokenizer_r.encode('Hey this is a <special> token' ) __UpperCamelCase = tokenizer_r.encode('<special>' , add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __UpperCamelCase = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ , ) __UpperCamelCase = self.tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) __UpperCamelCase = tokenizer_p.encode('Hey this is a <special> token' ) __UpperCamelCase = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] ="facebook/nllb-200-distilled-600M" SCREAMING_SNAKE_CASE_ : int =[ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] SCREAMING_SNAKE_CASE_ : List[str] =[ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] SCREAMING_SNAKE_CASE_ : Optional[Any] =[ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def _lowerCamelCase ( cls : Optional[int] ): __UpperCamelCase = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) __UpperCamelCase = 1 return cls def _lowerCamelCase ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 2_5_6_0_5_7 ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A_ ) def _lowerCamelCase ( self : Dict ): self.assertIn(A_ , self.tokenizer.all_special_ids ) # fmt: off __UpperCamelCase = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on __UpperCamelCase = self.tokenizer.decode(A_ , skip_special_tokens=A_ ) __UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ ) self.assertNotIn(self.tokenizer.eos_token , A_ ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = ['this is gunna be a long sentence ' * 2_0] assert isinstance(src_text[0] , A_ ) __UpperCamelCase = 1_0 __UpperCamelCase = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , A_ ) self.assertEqual(len(A_ ) , A_ ) def _lowerCamelCase ( self : List[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_6_2_0_3, 3] ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) __UpperCamelCase = NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ ) @require_torch def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __UpperCamelCase = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) __UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A_ ) self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt' ) __UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=1_0 , return_tensors='pt' ) __UpperCamelCase = targets['input_ids'] __UpperCamelCase = shift_tokens_right( A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(A_ ) , { # A, test, EOS, en_XX 'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 2_5_6_0_5_7, } , ) @require_torch def _lowerCamelCase ( self : Any ): __UpperCamelCase = True __UpperCamelCase = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) __UpperCamelCase = False __UpperCamelCase = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
53
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ): __UpperCamelCase =1 __UpperCamelCase =0 __UpperCamelCase =1 __UpperCamelCase =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
62
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class __snake_case ( unittest.TestCase ): lowerCAmelCase_ = StableDiffusionLDMaDPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS def __a ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) SCREAMING_SNAKE_CASE__ = CLIPTextModel(A_ ) SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) SCREAMING_SNAKE_CASE__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __a ( self : int , _lowercase : Tuple , _lowercase : Optional[int]=0 ): """simple docstring""" if str(A_ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ ) else: SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ ) SCREAMING_SNAKE_CASE__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline(**A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = rgb[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) SCREAMING_SNAKE_CASE__ = np.array( [0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] ) SCREAMING_SNAKE_CASE__ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline(**A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ ) SCREAMING_SNAKE_CASE__ = 3 * [inputs["""prompt"""]] # forward SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = rgb_slice_a[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = depth_slice_a[0, -3:, -1] SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ ) SCREAMING_SNAKE_CASE__ = 3 * [inputs.pop("""prompt""" )] SCREAMING_SNAKE_CASE__ = ldmad_pipe.tokenizer( A_ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE__ = text_inputs["""input_ids"""].to(A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe.text_encoder(A_ )[0] SCREAMING_SNAKE_CASE__ = prompt_embeds # forward SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = rgb_slice_a[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=A_ ) SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline(**A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ ) SCREAMING_SNAKE_CASE__ = """french fries""" SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ , negative_prompt=A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = rgb[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) SCREAMING_SNAKE_CASE__ = np.array( [0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] ) SCREAMING_SNAKE_CASE__ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple , _lowercase : int , _lowercase : int="cpu" , _lowercase : int=torch.floataa , _lowercase : List[Any]=0 ): """simple docstring""" SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ ) SCREAMING_SNAKE_CASE__ = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) SCREAMING_SNAKE_CASE__ = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ) SCREAMING_SNAKE_CASE__ = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_inputs(A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = rgb[0, -3:, -3:, -1].flatten() SCREAMING_SNAKE_CASE__ = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) SCREAMING_SNAKE_CASE__ = np.array( [0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] ) SCREAMING_SNAKE_CASE__ = np.array( [0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : Tuple="cpu" , _lowercase : int=torch.floataa , _lowercase : int=0 ): """simple docstring""" SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ ) SCREAMING_SNAKE_CASE__ = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) SCREAMING_SNAKE_CASE__ = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_inputs(A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = 0.49_55_86 SCREAMING_SNAKE_CASE__ = 0.33_79_55_15 SCREAMING_SNAKE_CASE__ = 1_12.4_85_18 SCREAMING_SNAKE_CASE__ = 98.48_97_46 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE__ = self.get_inputs(A_ ) SCREAMING_SNAKE_CASE__ = ldmad_pipe(**A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output.rgb, output.depth SCREAMING_SNAKE_CASE__ = 0.4_19_41_27 SCREAMING_SNAKE_CASE__ = 0.35_37_55_86 SCREAMING_SNAKE_CASE__ = 0.5_63_85_02 SCREAMING_SNAKE_CASE__ = 0.34_68_61_03 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
219
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") lowercase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) lowercase__ = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) lowercase__ = BeautifulSoup(res.text, "html.parser") lowercase__ = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'''https://google.com{link.get('href')}''')
151
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : float , snake_case_ : int ) -> List[Any]: '''simple docstring''' if digit_amount > 0: return round(number - int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) return number - int(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(decimal_isolate(1.5_3, 0)) print(decimal_isolate(3_5.3_4_5, 1)) print(decimal_isolate(3_5.3_4_5, 2)) print(decimal_isolate(3_5.3_4_5, 3)) print(decimal_isolate(-1_4.7_8_9, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-1_4.1_2_3, 1)) print(decimal_isolate(-1_4.1_2_3, 2)) print(decimal_isolate(-1_4.1_2_3, 3))
229
from typing import TYPE_CHECKING from ...utils import _LazyModule _A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A_ ( A_ ): _lowerCamelCase : Optional[int] = "Salesforce/blip-image-captioning-base" _lowerCamelCase : int = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) _lowerCamelCase : Optional[int] = "image_captioner" _lowerCamelCase : Tuple = AutoModelForVisionaSeq _lowerCamelCase : Union[str, Any] = ["image"] _lowerCamelCase : Tuple = ["text"] def __init__( self : Tuple , *snake_case_ : Dict , **snake_case_ : Union[str, Any] ): requires_backends(self , ["vision"] ) super().__init__(*A_ , **A_ ) def lowercase ( self : Tuple , snake_case_ : List[str] ): return self.pre_processor(images=A_ , return_tensors="pt" ) def lowercase ( self : Optional[int] , snake_case_ : Optional[Any] ): return self.model.generate(**A_ ) def lowercase ( self : List[Any] , snake_case_ : List[Any] ): return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
22
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ ) -> float: return 0.0 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) ) plt.show()
62
0
def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 1 while len(SCREAMING_SNAKE_CASE__ ) < 1E6: constant.append(str(SCREAMING_SNAKE_CASE__ ) ) i += 1 __lowerCamelCase = """""".join(SCREAMING_SNAKE_CASE__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
12
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : Tuple = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCAmelCase : Any = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } UpperCAmelCase : Optional[Any] = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _A ( ): """simple docstring""" a__ : Optional[int] =( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a__ : Tuple =bs[:] a__ : List[str] =0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE__ ) cs.append(2**8 + n ) n += 1 a__ : List[str] =[chr(SCREAMING_SNAKE_CASE__ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) def _A ( SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : List[str] =set() a__ : int =word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ : List[str] =char return pairs class __lowerCAmelCase ( A_): _lowercase : List[str] = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Dict = ["input_ids", "attention_mask"] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Union[str, Any]: '''simple docstring''' a__ : Dict =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token a__ : Tuple =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token a__ : Optional[int] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token a__ : List[str] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token a__ : Optional[int] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token a__ : List[str] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a__ : Dict =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding="utf-8" ) as vocab_handle: a__ : int =json.load(A_ ) a__ : Union[str, Any] ={v: k for k, v in self.encoder.items()} a__ : List[Any] =errors # how to handle errors in decoding a__ : List[str] =bytes_to_unicode() a__ : List[Any] ={v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding="utf-8" ) as merges_handle: a__ : Dict =merges_handle.read().split("\n" )[1:-1] a__ : Tuple =[tuple(merge.split() ) for merge in bpe_merges] a__ : Any =dict(zip(A_ , range(len(A_ ) ) ) ) a__ : Any ={} a__ : List[Any] =add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a__ : int =re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' if token in self.cache: return self.cache[token] a__ : Dict =tuple(A_ ) a__ : Optional[Any] =get_pairs(A_ ) if not pairs: return token while True: a__ : Union[str, Any] =min(A_ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(A_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a__ , a__ : Union[str, Any] =bigram a__ : int =[] a__ : Union[str, Any] =0 while i < len(A_ ): try: a__ : Optional[int] =word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a__ : List[str] =j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ : Optional[Any] =tuple(A_ ) a__ : Tuple =new_word if len(A_ ) == 1: break else: a__ : List[Any] =get_pairs(A_ ) a__ : List[Any] =" ".join(A_ ) a__ : int =word return word def _lowercase ( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : List[Any] =[] for token in re.findall(self.pat , A_ ): a__ : Union[str, Any] ="".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(" " ) ) return bpe_tokens def _lowercase ( self , lowerCAmelCase__ ) -> Dict: '''simple docstring''' return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' return self.decoder.get(A_ ) def _lowercase ( self , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Dict ="".join(A_ ) a__ : Optional[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a__ : Tuple =os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a__ : Tuple =os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(A_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" ) a__ : Tuple =0 with open(A_ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) a__ : Any =token_index writer.write(" ".join(A_ ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: '''simple docstring''' a__ : Optional[int] =[self.sep_token_id] a__ : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : str =kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): a__ : Tuple =" " + text return (text, kwargs) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _lowercase ( self , lowerCAmelCase__ ) -> List[int]: '''simple docstring''' a__ : str =[] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(A_ ) a__ : Optional[int] =" ".join(A_ ) a__ : str =self.encode(A_ ) if len(A_ ) > self.model_max_length: a__ : str =input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
95
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "mvp" UpperCAmelCase__ : Tuple = ["past_key_values"] UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]: __UpperCamelCase =vocab_size __UpperCamelCase =max_position_embeddings __UpperCamelCase =d_model __UpperCamelCase =encoder_ffn_dim __UpperCamelCase =encoder_layers __UpperCamelCase =encoder_attention_heads __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_attention_heads __UpperCamelCase =dropout __UpperCamelCase =attention_dropout __UpperCamelCase =activation_dropout __UpperCamelCase =activation_function __UpperCamelCase =init_std __UpperCamelCase =encoder_layerdrop __UpperCamelCase =decoder_layerdrop __UpperCamelCase =classifier_dropout __UpperCamelCase =use_cache __UpperCamelCase =encoder_layers __UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase =use_prompt __UpperCamelCase =prompt_length __UpperCamelCase =prompt_mid_dim super().__init__( pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ): __UpperCamelCase =self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
62
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _a = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = None lowerCamelCase__ = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase__ = os.path.abspath('''examples''' ) for item in os.listdir(A_ ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase__ = os.path.join(A_ , A_ ) if os.path.isfile(A_ ) and ".py" in item_path: with self.subTest( tested_script=A_ , feature_script=A_ , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase__ = compare_against_test( os.path.join(A_ , A_ ) , A_ , A_ , A_ ) lowerCamelCase__ = '''\n'''.join(A_ ) if special_strings is not None: for string in special_strings: lowerCamelCase__ = diff.replace(A_ , '''''' ) self.assertEqual(A_ , '''''' ) def __lowerCamelCase ( self ): '''simple docstring''' self.one_complete_example('''complete_nlp_example.py''' , A_ ) self.one_complete_example('''complete_nlp_example.py''' , A_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase__ = [ ''' ''' * 1_6 + '''{\n\n''', ''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 2_0 + '''"epoch": epoch,\n\n''', ''' ''' * 1_6 + '''},\n\n''', ''' ''' * 1_6 + '''step=epoch,\n''', ''' ''' * 1_2, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , A_ , A_ , A_ ) self.one_complete_example('''complete_cv_example.py''' , A_ , A_ , A_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __A ( A_ ): '''simple docstring''' lowerCAmelCase_ = False @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' super().setUpClass() lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() lowerCamelCase__ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() lowerCamelCase__ = run_command(self._launch_args + testargs , return_stdout=A_ ) self.assertNotIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() lowerCamelCase__ = run_command(self._launch_args + testargs , return_stdout=A_ ) if torch.cuda.is_available(): lowerCamelCase__ = torch.cuda.device_count() else: lowerCamelCase__ = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) else: self.assertIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''\n examples/by_feature/cross_validation.py\n --num_folds 2\n '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase__ = run_command(self._launch_args + testargs , return_stdout=A_ ) lowerCamelCase__ = re.findall('''({.+})''' , A_ ) lowerCamelCase__ = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase__ = ast.literal_eval(A_ ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __lowerCamelCase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase__ = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A_ , '''tracking''' ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
209
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = GPTaTokenizer UpperCAmelCase__ : Any = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : int = {"add_prefix_space": True} UpperCAmelCase__ : Any = False def _a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _a ( self , **A_ ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='lower newer' __UpperCamelCase ='lower newer' return input_text, output_text def _a ( self ) -> List[Any]: __UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase ='lower newer' __UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self ) -> int: if not self.test_rust_tokenizer: return __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase ='lower newer' # Testing tokenization __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token __UpperCamelCase =tokens + [rust_tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self , *A_ , **A_ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _a ( self , A_=15 ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def _a ( self ) -> int: __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __UpperCamelCase =tokenizer.pad_token_id __UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) __UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ='$$$' __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =tokenizer.bos_token_id __UpperCamelCase =tokenizer(A_ ) __UpperCamelCase =tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase =tokenizer.decode(out_s.input_ids ) __UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> Any: # TODO: change to self.get_tokenizers() when the fast version is implemented __UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase ='Encode this.' __UpperCamelCase ='This one too please.' __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) __UpperCamelCase =tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) __UpperCamelCase =encoded_sequence_dict['input_ids'] __UpperCamelCase =encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) , len(A_ ) ) __UpperCamelCase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] __UpperCamelCase =[x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) def _a ( self ) -> Dict: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _a ( self ) -> List[Any]: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='bos' __UpperCamelCase =tokenizer.get_vocab()['bos'] __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
62
0
import math def _a ( lowerCamelCase: int ) -> List[str]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __A = F"""Input value of [number={number}] must be an integer""" raise TypeError(SCREAMING_SNAKE_CASE__ ) if number < 1: __A = F"""Input value of [number={number}] must be > 0""" raise ValueError(SCREAMING_SNAKE_CASE__ ) elif number == 1: return 3 elif number == 2: return 5 else: __A = int(math.log(number // 3 , 2 ) ) + 2 __A = [3, 5] __A = 2 __A = 3 for block in range(1 , SCREAMING_SNAKE_CASE__ ): for _ in range(SCREAMING_SNAKE_CASE__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): snake_case__ : List[Any] = 0 try: snake_case__ : List[Any] = proth(number) except ValueError: print(f'ValueError: there is no {number}th Proth number') continue print(f'The {number}th Proth number: {value}')
117
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
62
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class lowerCamelCase_ : '''simple docstring''' a__ : str = field( metadata={"""help""": """The output directory where the model will be written."""} , ) a__ : str = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } , ) a__ : str = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } , ) a__ : Optional[str] = field( default=A_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) a__ : Optional[str] = field( default=A_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :int = HfArgumentParser((ModelArguments,) ) ((__UpperCamelCase ) , ) :Any = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __UpperCamelCase :str = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __UpperCamelCase :Dict = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __UpperCamelCase :str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __UpperCamelCase :Tuple = True __UpperCamelCase :Any = True __UpperCamelCase :Union[str, Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __UpperCamelCase :Dict = decoder_config.decoder_start_token_id __UpperCamelCase :Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: __UpperCamelCase :Optional[int] = decoder_config.bos_token_id if pad_token_id is None: __UpperCamelCase :Tuple = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __UpperCamelCase :Union[str, Any] = decoder_config.eos_token_id __UpperCamelCase :Optional[int] = decoder_start_token_id __UpperCamelCase :Optional[Any] = pad_token_id __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
43
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ): __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] __UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
62
0
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if not len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can\'t be zero." ) # Extract the coefficients lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = equationa lowercase__ , lowercase__ , lowercase__ : int = equationa # Calculate the determinants of the matrices lowercase__ : int = aa * ba - aa * ba lowercase__ : str = ca * ba - ca * ba lowercase__ : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase__ : Any = determinant_x / determinant lowercase__ : Tuple = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '▁' _A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} _A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } _A = {'vinai/bartpho-syllable': 1024} class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase =vocab_file __UpperCamelCase =monolingual_vocab_file __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase ={} __UpperCamelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =cnt cnt += 1 with open(A_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase =line.strip().split()[0] __UpperCamelCase =len(self.fairseq_tokens_to_ids ) if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =len(self.fairseq_tokens_to_ids ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> List[str]: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase =[self.cls_token_id] __UpperCamelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> Any: return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def _a ( self , A_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , A_ ) -> int: return self.fairseq_ids_to_tokens[index] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(A_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( A_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , A_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(A_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(A_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
62
0
'''simple docstring''' import sys a__ : Optional[int] =( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def lowercase__ ( __lowercase : str = N ) -> Any: """simple docstring""" __UpperCamelCase = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ): __UpperCamelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: __UpperCamelCase = product return largest_product if __name__ == "__main__": print(f'{solution() = }')
53
from numpy import exp, pi, sqrt def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
62
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Any = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
219
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None: super().__init__(**A_ ) __UpperCamelCase =size if size is not None else {'shortest_edge': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) __UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) __UpperCamelCase =do_resize __UpperCamelCase =size __UpperCamelCase =resample __UpperCamelCase =do_center_crop __UpperCamelCase =crop_size __UpperCamelCase =do_rescale __UpperCamelCase =rescale_factor __UpperCamelCase =do_normalize __UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase =do_convert_rgb def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image: __UpperCamelCase =do_resize if do_resize is not None else self.do_resize __UpperCamelCase =size if size is not None else self.size __UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ ) __UpperCamelCase =resample if resample is not None else self.resample __UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase =crop_size if crop_size is not None else self.crop_size __UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) __UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase =image_mean if image_mean is not None else self.image_mean __UpperCamelCase =image_std if image_std is not None else self.image_std __UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase =make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase =[convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase =[to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images] __UpperCamelCase ={'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
62
0
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase__ = "src/diffusers" lowercase__ = "." # This is to make sure the diffusers module imported is the one in the repo. lowercase__ = importlib.util.spec_from_file_location( "diffusers", os.path.join(DIFFUSERS_PATH, "__init__.py"), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase__ = spec.loader.load_module() def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): return line.startswith(SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE__ ) is not None def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : List[Any] = object_name.split('.' ) UpperCAmelCase : Union[str, Any] = 0 # First let's find the module where our object lives. UpperCAmelCase : Optional[Any] = parts[i] while i < len(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , F"""{module}.py""" ) ): i += 1 if i < len(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , parts[i] ) if i >= len(SCREAMING_SNAKE_CASE__ ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCAmelCase : Optional[int] = f.readlines() # Now let's find the class / func in the code! UpperCAmelCase : Tuple = '' UpperCAmelCase : Optional[Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(SCREAMING_SNAKE_CASE__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE__ ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). UpperCAmelCase : int = line_index while line_index < len(SCREAMING_SNAKE_CASE__ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCAmelCase : Optional[Any] = lines[start_index:line_index] return "".join(SCREAMING_SNAKE_CASE__ ) lowercase__ = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") lowercase__ = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") lowercase__ = re.compile(r"<FILL\s+[^>]*>") def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Optional[int] = code.split('\n' ) UpperCAmelCase : List[str] = 0 while idx < len(SCREAMING_SNAKE_CASE__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(SCREAMING_SNAKE_CASE__ ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = len(get_indent(SCREAMING_SNAKE_CASE__ ) ) > 0 if has_indent: UpperCAmelCase : Dict = F"""class Bla:\n{code}""" UpperCAmelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Union[str, Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase , UpperCAmelCase : List[Any] = style_docstrings_in_code(SCREAMING_SNAKE_CASE__ ) return result[len('class Bla:\n' ) :] if has_indent else result def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ): with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f: UpperCAmelCase : List[str] = f.readlines() UpperCAmelCase : Any = [] UpperCAmelCase : Tuple = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : List[str] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = search.groups() UpperCAmelCase : List[str] = find_code_in_diffusers(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : int = get_indent(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 UpperCAmelCase : str = theoretical_indent UpperCAmelCase : List[Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. UpperCAmelCase : str = True while line_index < len(SCREAMING_SNAKE_CASE__ ) and should_continue: line_index += 1 if line_index >= len(SCREAMING_SNAKE_CASE__ ): break UpperCAmelCase : Any = lines[line_index] UpperCAmelCase : int = _should_continue(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and re.search(F"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCAmelCase : int = lines[start_index:line_index] UpperCAmelCase : List[str] = ''.join(SCREAMING_SNAKE_CASE__ ) # Remove any nested `Copied from` comments to avoid circular copies UpperCAmelCase : Optional[int] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE__ ) is None] UpperCAmelCase : Union[str, Any] = '\n'.join(SCREAMING_SNAKE_CASE__ ) # Before comparing, use the `replace_pattern` on the original code. if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase : Any = replace_pattern.replace('with' , '' ).split(',' ) UpperCAmelCase : Any = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE__ ) for p in patterns] for pattern in patterns: if pattern is None: continue UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = pattern.groups() UpperCAmelCase : List[Any] = re.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if option.strip() == "all-casing": UpperCAmelCase : Any = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line UpperCAmelCase : List[Any] = blackify(lines[start_index - 1] + theoretical_code ) UpperCAmelCase : Dict = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: UpperCAmelCase : Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:] UpperCAmelCase : Tuple = start_index + 1 if overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(SCREAMING_SNAKE_CASE__ ) return diffs def UpperCamelCase( UpperCAmelCase_ = False ): UpperCAmelCase : Tuple = glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[str] = [] for filename in all_files: UpperCAmelCase : Optional[int] = is_copy_consistent(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase : str = '\n'.join(SCREAMING_SNAKE_CASE__ ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowercase__ = parser.parse_args() check_copies(args.fix_and_overwrite)
151
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =qkv_bias __UpperCamelCase =num_detection_tokens __UpperCamelCase =use_mid_position_embeddings __UpperCamelCase =auxiliary_loss # Hungarian matcher __UpperCamelCase =class_cost __UpperCamelCase =bbox_cost __UpperCamelCase =giou_cost # Loss coefficients __UpperCamelCase =bbox_loss_coefficient __UpperCamelCase =giou_loss_coefficient __UpperCamelCase =eos_coefficient class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self ) -> float: return 1E-4 @property def _a ( self ) -> int: return 12
62
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A : List[Any] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict = [ '''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''IBertForMaskedLM''', '''IBertForMultipleChoice''', '''IBertForQuestionAnswering''', '''IBertForSequenceClassification''', '''IBertForTokenClassification''', '''IBertModel''', '''IBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
229
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __SCREAMING_SNAKE_CASE :Any = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
22
from __future__ import annotations import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =size # approximate the overall size of segment tree with given value __UpperCamelCase =[0 for i in range(0 , 4 * size )] # create array to store lazy update __UpperCamelCase =[0 for i in range(0 , 4 * size )] __UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self , A_ ) -> int: return idx * 2 def _a ( self , A_ ) -> int: return idx * 2 + 1 def _a ( self , A_ , A_ , A_ , A_ ) -> None: if left_element == right_element: __UpperCamelCase =a[left_element - 1] else: __UpperCamelCase =(left_element + right_element) // 2 self.build(self.left(A_ ) , A_ , A_ , A_ ) self.build(self.right(A_ ) , mid + 1 , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase =val if left_element != right_element: __UpperCamelCase =val __UpperCamelCase =val __UpperCamelCase =True __UpperCamelCase =True return True __UpperCamelCase =(left_element + right_element) // 2 self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ ) self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) return True def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase =(left_element + right_element) // 2 __UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ ) __UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ ) return max(A_ , A_ ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _A = 15 _A = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
62
0
def lowerCamelCase__ ( A__ : int , A__ : Union[str, Any] , A__ : str , A__ : Union[str, Any] ): '''simple docstring''' if height >= 1: move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) move_disk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCamelCase__ ( A__ : Any , A__ : List[Any] ): '''simple docstring''' print("""moving disk from""" , SCREAMING_SNAKE_CASE__ , """to""" , SCREAMING_SNAKE_CASE__ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = int(input("""Height of hanoi: """ ).strip() ) move_tower(SCREAMING_SNAKE_CASE__ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
12
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
0
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : Dict =tmp_path / "cache" a__ : Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a__ : Union[str, Any] =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" a__ : Optional[Any] =tmp_path / "cache" a__ : Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} a__ : Optional[Any] =features.copy() if features else default_expected_features a__ : Any =( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) a__ : Optional[int] =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" a__ : int =tmp_path / "cache" a__ : Tuple ={"col_3": "float64", "col_1": "string", "col_2": "int64"} a__ : Any =features.copy() if features else default_expected_features a__ : int =( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) a__ : Dict =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : str ={"col_2": "int64", "col_3": "float64", "col_1": "string"} a__ : Union[str, Any] =features.copy() a__ : Any =( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) a__ : str =tmp_path / "cache" a__ : int =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" a__ : List[Any] =tmp_path / "cache" a__ : Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} a__ : Optional[Any] =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ): """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a__ : Optional[int] =jsonl_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a__ : Tuple =[jsonl_path] a__ : Union[str, Any] =tmp_path / "cache" a__ : Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} a__ : Optional[int] =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=("train",) ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: a__ : Union[str, Any] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" a__ : str =tmp_path / "cache" a__ : Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a__ : str =JsonDatasetReader({"train": jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" a__ : Tuple =tmp_path / "cache" a__ : Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} a__ : str =features.copy() if features else default_expected_features a__ : Tuple =( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) a__ : Dict =JsonDatasetReader({"train": jsonl_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if split: a__ : List[Any] ={split: jsonl_path} else: a__ : Union[str, Any] ="train" a__ : int ={"train": jsonl_path, "test": jsonl_path} a__ : Dict =tmp_path / "cache" a__ : List[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} a__ : Tuple =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" return json.load(SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE : Any ): """simple docstring""" return [json.loads(SCREAMING_SNAKE_CASE__ ) for line in buffer] class __lowerCAmelCase : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(A_ , A_ , lines=A_ ).write() buffer.seek(0 ) a__ : Any =load_json_function(A_ ) assert isinstance(A_ , A_ ) assert isinstance(exported_content[0] , A_ ) assert len(A_ ) == 1_0 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ ).write() buffer.seek(0 ) a__ : Any =load_json(A_ ) assert isinstance(A_ , A_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(A_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(A_ ) == 1_0 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(A_ , A_ , lines=A_ , num_proc=2 ).write() buffer.seek(0 ) a__ : Tuple =load_json_function(A_ ) assert isinstance(A_ , A_ ) assert isinstance(exported_content[0] , A_ ) assert len(A_ ) == 1_0 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ , num_proc=2 ).write() buffer.seek(0 ) a__ : Optional[Any] =load_json(A_ ) assert isinstance(A_ , A_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(A_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(A_ ) == 1_0 def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' with pytest.raises(A_ ): with io.BytesIO() as buffer: JsonDatasetWriter(A_ , A_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Optional[Any] =tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' a__ : Any =str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(A_ , A_ , compression=A_ ).write() with fsspec.open(A_ , "rb" , compression="infer" ) as f: a__ : Dict =f.read() with fsspec.open(A_ , "rb" , compression="infer" ) as f: a__ : str =f.read() assert exported_content == original_content
95
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip_vision_model" def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =intermediate_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =patch_size __UpperCamelCase =image_size __UpperCamelCase =initializer_range __UpperCamelCase =attention_dropout __UpperCamelCase =layer_norm_eps __UpperCamelCase =hidden_act __UpperCamelCase =qkv_bias @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "instructblip_qformer" def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =position_embedding_type __UpperCamelCase =cross_attention_frequency __UpperCamelCase =encoder_hidden_size @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip" UpperCAmelCase__ : Optional[Any] = True def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]: super().__init__(**A_ ) if vision_config is None: __UpperCamelCase ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCamelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase =InstructBlipVisionConfig(**A_ ) __UpperCamelCase =InstructBlipQFormerConfig(**A_ ) __UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase =self.text_config.tie_word_embeddings __UpperCamelCase =self.text_config.is_encoder_decoder __UpperCamelCase =num_query_tokens __UpperCamelCase =self.vision_config.hidden_size __UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase =1.0 __UpperCamelCase =0.02 @classmethod def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.vision_config.to_dict() __UpperCamelCase =self.qformer_config.to_dict() __UpperCamelCase =self.text_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _a = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: lowerCamelCase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ , lowerCamelCase__ = XLMProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ) else: lowerCamelCase__ = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ , lowerCamelCase__ = ProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ = ['''key_proj''', '''value_proj''', '''query_proj'''] lowerCamelCase__ = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: lowerCamelCase__ = key.split('''.''' ) if attributes[0] == "lm_head": lowerCamelCase__ = prophet lowerCamelCase__ = prophet_old else: lowerCamelCase__ = prophet.prophetnet lowerCamelCase__ = prophet_old.model lowerCamelCase__ = False for attribute in attributes: if attribute in mapping: lowerCamelCase__ = mapping[attribute] if not hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0: lowerCamelCase__ = attribute elif hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ): lowerCamelCase__ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowerCamelCase__ = old_model.weight logger.info(F'{attribute} is initialized.' ) lowerCamelCase__ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowerCamelCase__ = old_model.bias logger.info(F'{attribute} is initialized' ) lowerCamelCase__ = True break elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE__ ,'''in_proj_weight''' ): lowerCamelCase__ = old_model.in_proj_weight.shape[0] // 3 lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowerCamelCase__ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowerCamelCase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowerCamelCase__ = True break if attribute.isdigit(): lowerCamelCase__ = model[int(SCREAMING_SNAKE_CASE__ )] lowerCamelCase__ = old_model[int(SCREAMING_SNAKE_CASE__ )] else: lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) if old_attribute == "": lowerCamelCase__ = old_model else: if not hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ): raise ValueError(F'{old_model} does not have {old_attribute}' ) lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
209
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _A = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _A = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(rows * cols * num_images ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) __UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =labels_dense.shape[0] __UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes __UpperCamelCase =numpy.zeros((num_labels, num_classes) ) __UpperCamelCase =1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( A_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __UpperCamelCase =10000 __UpperCamelCase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __UpperCamelCase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase =images.astype(numpy.floataa ) __UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 ) __UpperCamelCase =images __UpperCamelCase =labels __UpperCamelCase =0 __UpperCamelCase =0 @property def _a ( self ) -> Tuple: return self._images @property def _a ( self ) -> Union[str, Any]: return self._labels @property def _a ( self ) -> Optional[Any]: return self._num_examples @property def _a ( self ) -> List[str]: return self._epochs_completed def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]: if fake_data: __UpperCamelCase =[1] * 784 __UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) __UpperCamelCase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perma] __UpperCamelCase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase =self._num_examples - start __UpperCamelCase =self._images[start : self._num_examples] __UpperCamelCase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perm] __UpperCamelCase =self.labels[perm] # Start next epoch __UpperCamelCase =0 __UpperCamelCase =batch_size - rest_num_examples __UpperCamelCase =self._index_in_epoch __UpperCamelCase =self._images[start:end] __UpperCamelCase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): gfile.MakeDirs(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase =f.size() print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' ) return filepath @deprecated( SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =fake() __UpperCamelCase =fake() __UpperCamelCase =fake() return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ ) if not source_url: # empty string check __UpperCamelCase =DEFAULT_SOURCE_URL __UpperCamelCase ='train-images-idx3-ubyte.gz' __UpperCamelCase ='train-labels-idx1-ubyte.gz' __UpperCamelCase ='t10k-images-idx3-ubyte.gz' __UpperCamelCase ='t10k-labels-idx1-ubyte.gz' __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =( 'Validation size should be between 0 and ' F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =train_images[:validation_size] __UpperCamelCase =train_labels[:validation_size] __UpperCamelCase =train_images[validation_size:] __UpperCamelCase =train_labels[validation_size:] __UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed} __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
62
0
from __future__ import annotations def _a ( lowerCamelCase: list[int] , lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __A = 0 __A = len(SCREAMING_SNAKE_CASE__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: __A = i + 1 else: __A = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'{two_pointer([2, 7, 11, 15], 9) = }')
117
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = TransfoXLTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False def _a ( self ) -> Union[str, Any]: super().setUp() __UpperCamelCase =[ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self , **A_ ) -> Optional[int]: __UpperCamelCase =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='<unk> UNwanted , running' __UpperCamelCase ='<unk> unwanted, running' return input_text, output_text def _a ( self ) -> str: __UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _a ( self ) -> Any: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self ) -> int: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) __UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' __UpperCamelCase =[ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , 'new1' )
62
0
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print(f"""Building PyTorch model from configuration: {config}""" ) __UpperCamelCase :str = AlbertForPreTraining(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
def __lowerCamelCase ( lowerCamelCase__ = 100 ): """simple docstring""" lowercase__ : List[Any] = n * (n + 1) * (2 * n + 1) / 6 lowercase__ : List[Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
130
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f: __UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] next(SCREAMING_SNAKE_CASE__ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =[] for dataset in encoded_datasets: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa ) __UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =mc_label __UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) __UpperCamelCase =parser.parse_args() print(SCREAMING_SNAKE_CASE__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __UpperCamelCase =['_start_', '_delimiter_', '_classify_'] __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) model.to(SCREAMING_SNAKE_CASE__ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj] logger.info('Encoding dataset...' ) __UpperCamelCase =load_rocstories_dataset(args.train_dataset ) __UpperCamelCase =load_rocstories_dataset(args.eval_dataset ) __UpperCamelCase =(train_dataset, eval_dataset) __UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) # Compute the max input length for the Transformer __UpperCamelCase =model.config.n_positions // 2 - 2 __UpperCamelCase =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1] __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size ) __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __UpperCamelCase =args.max_steps __UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1 else: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs __UpperCamelCase =list(model.named_parameters() ) __UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __UpperCamelCase =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon ) __UpperCamelCase =get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ ) if args.do_train: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch __UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __UpperCamelCase =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE__ ) if args.do_eval: model.eval() __UpperCamelCase , __UpperCamelCase =0, 0 __UpperCamelCase , __UpperCamelCase =0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch with torch.no_grad(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model( SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =mc_logits.detach().cpu().numpy() __UpperCamelCase =mc_labels.to('cpu' ).numpy() __UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __UpperCamelCase =eval_loss / nb_eval_steps __UpperCamelCase =eval_accuracy / nb_eval_examples __UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None __UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
62
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) a__ : int ='''\\n Text data.\n Second line of data.''' a__ : Optional[Any] ='''file''' @pytest.fixture(scope='session' ) def lowercase__ ( __lowercase : Optional[int] ) -> str: """simple docstring""" __UpperCamelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') __UpperCamelCase = bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' ) with zstd.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return path @pytest.fixture def lowercase__ ( __lowercase : int ) -> List[Any]: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE__ ) , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def lowercase__ ( __lowercase : Dict , __lowercase : Dict , __lowercase : List[str] , __lowercase : Dict , __lowercase : Dict , __lowercase : Any ) -> Optional[int]: """simple docstring""" __UpperCamelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} __UpperCamelCase = input_paths[compression_format] __UpperCamelCase = tmp_path / 'cache' __UpperCamelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE__ , extract_compressed_file=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase = cached_path(SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase = f.read() with open(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def lowercase__ ( __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Any ) -> Dict: """simple docstring""" __UpperCamelCase = 'custom_cache' __UpperCamelCase = 'custom_extracted_dir' __UpperCamelCase = tmp_path / 'custom_extracted_path' if default_extracted: __UpperCamelCase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , SCREAMING_SNAKE_CASE__ ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __UpperCamelCase = xz_file __UpperCamelCase = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase = cached_path(SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ ) assert Path(SCREAMING_SNAKE_CASE__ ).parent.parts[-2:] == expected def lowercase__ ( __lowercase : Optional[Any] ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path(SCREAMING_SNAKE_CASE__ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file # relative path __UpperCamelCase = str(Path(SCREAMING_SNAKE_CASE__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file def lowercase__ ( __lowercase : List[Any] ) -> Any: """simple docstring""" __UpperCamelCase = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): cached_path(SCREAMING_SNAKE_CASE__ ) # relative path __UpperCamelCase = './__missing_file__.txt' with pytest.raises(SCREAMING_SNAKE_CASE__ ): cached_path(SCREAMING_SNAKE_CASE__ ) def lowercase__ ( __lowercase : str ) -> Tuple: """simple docstring""" __UpperCamelCase = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE__ ) def lowercase__ ( ) -> Optional[Any]: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE__ ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE__ ) def lowercase__ ( __lowercase : List[str] ) -> List[Any]: """simple docstring""" __UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(SCREAMING_SNAKE_CASE__ ): http_get('https://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE__ ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE__ ) def lowercase__ ( __lowercase : str ) -> Tuple: """simple docstring""" __UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(SCREAMING_SNAKE_CASE__ ): ftp_get('ftp://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE__ ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE__ ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(SCREAMING_SNAKE_CASE__ ): fsspec_get('s3://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE__ ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): fsspec_head('s3://huggingface.co' )
53
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ): __UpperCamelCase =1 __UpperCamelCase =0 __UpperCamelCase =1 __UpperCamelCase =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
62
0
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __snake_case : def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : str=13 , _lowercase : Tuple=7 , _lowercase : List[str]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=True , _lowercase : str=99 , _lowercase : List[str]=32 , _lowercase : Optional[int]=5 , _lowercase : Dict=4 , _lowercase : Optional[Any]=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : str=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : List[Any]=16 , _lowercase : Dict=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Any=3 , _lowercase : Tuple=4 , _lowercase : Dict=None , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_mask SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : List[Any] ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , ) def __a ( self : int , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Any , _lowercase : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = OpenLlamaModel(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ ) SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Any , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = OpenLlamaModel(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Tuple , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Any , _lowercase : Union[str, Any] , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : int , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = OpenLlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) SCREAMING_SNAKE_CASE__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )["""hidden_states"""][0] SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["""hidden_states"""][0] # select random slice SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) ) def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __snake_case ( A_ , A_ , A_ , unittest.TestCase ): lowerCAmelCase_ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = OpenLlamaModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __a ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ = type self.model_tester.create_and_check_model(*A_ ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = """single_label_classification""" SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = """multi_label_classification""" SCREAMING_SNAKE_CASE__ = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ = OpenLlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def __a ( self : Union[str, Any] ): """simple docstring""" pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __a ( self : str , _lowercase : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = ids_tensor([1, 10] , config.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE__ = OpenLlamaModel(A_ ) original_model.to(A_ ) original_model.eval() SCREAMING_SNAKE_CASE__ = original_model(A_ ).last_hidden_state SCREAMING_SNAKE_CASE__ = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE__ = {"""type""": scaling_type, """factor""": 10.0} SCREAMING_SNAKE_CASE__ = OpenLlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() SCREAMING_SNAKE_CASE__ = scaled_model(A_ ).last_hidden_state SCREAMING_SNAKE_CASE__ = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
219
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowercase__ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$") @total_ordering @dataclass class A_ : '''simple docstring''' UpperCAmelCase_ : str UpperCAmelCase_ : Optional[str] = None UpperCAmelCase_ : Optional[Union[str, int]] = None UpperCAmelCase_ : Optional[Union[str, int]] = None UpperCAmelCase_ : Optional[Union[str, int]] = None def UpperCAmelCase_ ( self : Any ) -> Dict: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = _str_to_version_tuple(self.version_str ) def __repr__( self : Any ) -> List[Any]: return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def UpperCAmelCase_ ( self : Any ) -> List[str]: return self.major, self.minor, self.patch def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Union[str, Any] ) -> Optional[Any]: if isinstance(A_ , A_ ): return Version(A_ ) elif isinstance(A_ , A_ ): return other raise TypeError(f"""{other} (type {type(A_ )}) cannot be compared to version.""" ) def __eq__( self : int , lowercase_ : Optional[int] ) -> Union[str, Any]: try: UpperCAmelCase : str = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Dict , lowercase_ : Tuple ) -> Optional[int]: UpperCAmelCase : List[Any] = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : List[Any] ) -> Optional[Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Any ) -> Tuple: UpperCAmelCase : Dict = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def UpperCAmelCase_ ( self : List[Any] ) -> str: return self.version_str def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : str = _VERSION_REG.match(SCREAMING_SNAKE_CASE__ ) if not res: raise ValueError(F"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(SCREAMING_SNAKE_CASE__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def UpperCamelCase( UpperCAmelCase_ ): return ".".join(str(SCREAMING_SNAKE_CASE__ ) for v in version_tuple )
151
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import enum import shutil import sys _A , _A : Optional[Any] = shutil.get_terminal_size() _A : Optional[int] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''} class _lowercase ( enum.Enum ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 _SCREAMING_SNAKE_CASE : int = 1 def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple="" ) -> Optional[Any]: '''simple docstring''' sys.stdout.write(str(SCREAMING_SNAKE_CASE__ ) + end ) sys.stdout.flush() def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]="" ) -> Dict: '''simple docstring''' forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE__ ) def UpperCamelCase_ ( ) -> Optional[Any]: '''simple docstring''' forceWrite("""\r""" ) def UpperCamelCase_ ( snake_case_ : int , snake_case_ : str ) -> List[str]: '''simple docstring''' forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def UpperCamelCase_ ( ) -> Union[str, Any]: '''simple docstring''' forceWrite(""" """ * TERMINAL_WIDTH ) reset_cursor() def UpperCamelCase_ ( ) -> Union[str, Any]: '''simple docstring''' reset_cursor() forceWrite("""-""" * TERMINAL_WIDTH )
229
from typing import TYPE_CHECKING from ...utils import _LazyModule _A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import tensorflow as tf from ...tf_utils import shape_list class A_ ( tf.keras.layers.Layer ): def __init__( self : Dict , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[int]=1 , snake_case_ : int=False , **snake_case_ : Optional[Any] ): super().__init__(**A_ ) _UpperCAmelCase = vocab_size _UpperCAmelCase = d_embed _UpperCAmelCase = d_proj _UpperCAmelCase = cutoffs + [vocab_size] _UpperCAmelCase = [0] + self.cutoffs _UpperCAmelCase = div_val _UpperCAmelCase = self.cutoffs[0] _UpperCAmelCase = len(self.cutoffs ) - 1 _UpperCAmelCase = self.shortlist_size + self.n_clusters _UpperCAmelCase = keep_order _UpperCAmelCase = [] _UpperCAmelCase = [] def lowercase ( self : str , snake_case_ : Tuple ): if self.n_clusters > 0: _UpperCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=A_ , name="cluster_weight" ) _UpperCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer="zeros" , trainable=A_ , name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _UpperCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=A_ , name=f'out_projs_._{i}' , ) self.out_projs.append(A_ ) else: self.out_projs.append(A_ ) _UpperCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) _UpperCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer="zeros" , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] _UpperCAmelCase = self.d_embed // (self.div_val**i) _UpperCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=A_ , name=f'out_projs_._{i}' ) self.out_projs.append(A_ ) _UpperCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) _UpperCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer="zeros" , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) super().build(A_ ) @staticmethod def lowercase ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict=None ): _UpperCAmelCase = x if proj is not None: _UpperCAmelCase = tf.einsum("ibd,ed->ibe" , A_ , A_ ) return tf.einsum("ibd,nd->ibn" , A_ , A_ ) + b @staticmethod def lowercase ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ): _UpperCAmelCase = shape_list(A_ ) _UpperCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) _UpperCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(A_ , A_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=False ): _UpperCAmelCase = 0 if self.n_clusters == 0: _UpperCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _UpperCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ ) _UpperCAmelCase = tf.nn.log_softmax(A_ , axis=-1 ) else: _UpperCAmelCase = shape_list(A_ ) _UpperCAmelCase = [] _UpperCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _UpperCAmelCase = (target >= l_idx) & (target < r_idx) _UpperCAmelCase = tf.where(A_ ) _UpperCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx if self.div_val == 1: _UpperCAmelCase = self.out_layers[0][0][l_idx:r_idx] _UpperCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: _UpperCAmelCase = self.out_layers[i][0] _UpperCAmelCase = self.out_layers[i][1] if i == 0: _UpperCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) _UpperCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) _UpperCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] ) _UpperCAmelCase = tf.nn.log_softmax(A_ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _UpperCAmelCase = tf.boolean_mask(A_ , A_ ) _UpperCAmelCase = self._gather_logprob(A_ , A_ ) else: _UpperCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] ) _UpperCAmelCase = tf.nn.log_softmax(A_ ) _UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster _UpperCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(A_ ) if target is not None: _UpperCAmelCase = tf.boolean_mask(A_ , A_ ) _UpperCAmelCase = tf.boolean_mask(A_ , A_ ) _UpperCAmelCase = self._gather_logprob(A_ , A_ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) ) _UpperCAmelCase = tf.concat(A_ , axis=-1 ) if target is not None: if return_mean: _UpperCAmelCase = tf.reduce_mean(A_ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(A_ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(A_ , name=self.name , aggregation="mean" if return_mean else "" ) return out
22
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ ) -> float: return 0.0 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =5_12 __UpperCamelCase =[1] + [0] * (size - 1) __UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs] __UpperCamelCase =[0] * (samplerate - size) # zero-padding outputs += filler __UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) ) plt.show()
62
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase__( A_): UpperCAmelCase__ : Optional[int] = "char" UpperCAmelCase__ : List[str] = "bpe" UpperCAmelCase__ : Tuple = "wp" UpperCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase__( A_): UpperCAmelCase__ : Optional[int] = ["image_processor", "char_tokenizer"] UpperCAmelCase__ : str = "ViTImageProcessor" UpperCAmelCase__ : Optional[Any] = "MgpstrTokenizer" def __init__( self: int , UpperCamelCase_: str=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Dict ): __lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A_ , ) __lowerCamelCase = kwargs.pop("""feature_extractor""" ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) __lowerCamelCase = tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained("""gpt2""" ) __lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(A_ , A_ ) def __call__( self: List[str] , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Optional[int] ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __lowerCamelCase = self.image_processor(A_ , return_tensors=A_ , **A_ ) if text is not None: __lowerCamelCase = self.char_tokenizer(A_ , return_tensors=A_ , **A_ ) if text is None: return inputs elif images is None: return encodings else: __lowerCamelCase = encodings["""input_ids"""] return inputs def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sequences __lowerCamelCase = char_preds.size(0 ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(A_ , """char""" ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(A_ , """bpe""" ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(A_ , """wp""" ) __lowerCamelCase = [] __lowerCamelCase = [] for i in range(A_ ): __lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]] __lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]] __lowerCamelCase = scores.index(max(A_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __lowerCamelCase = {} __lowerCamelCase = final_strs __lowerCamelCase = final_scores __lowerCamelCase = char_strs __lowerCamelCase = bpe_strs __lowerCamelCase = wp_strs return out def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any ): if format == DecodeType.CHARACTER: __lowerCamelCase = self.char_decode __lowerCamelCase = 1 __lowerCamelCase = """[s]""" elif format == DecodeType.BPE: __lowerCamelCase = self.bpe_decode __lowerCamelCase = 2 __lowerCamelCase = """#""" elif format == DecodeType.WORDPIECE: __lowerCamelCase = self.wp_decode __lowerCamelCase = 1_02 __lowerCamelCase = """[SEP]""" else: raise ValueError(F'Format {format} is not supported.' ) __lowerCamelCase, __lowerCamelCase = [], [] __lowerCamelCase = pred_logits.size(0 ) __lowerCamelCase = pred_logits.size(1 ) __lowerCamelCase, __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=A_ , sorted=A_ ) __lowerCamelCase = preds_index.view(-1 , A_ )[:, 1:] __lowerCamelCase = decoder(A_ ) __lowerCamelCase, __lowerCamelCase = torch.nn.functional.softmax(A_ , dim=2 ).max(dim=2 ) __lowerCamelCase = preds_max_prob[:, 1:] for index in range(A_ ): __lowerCamelCase = preds_str[index].find(A_ ) __lowerCamelCase = preds_str[index][:pred_eos] __lowerCamelCase = preds_index[index].cpu().tolist() __lowerCamelCase = pred_index.index(A_ ) if eos_token in pred_index else -1 __lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1] __lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A_ ) conf_scores.append(A_ ) return dec_strs, conf_scores def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int ): __lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(A_ )] return decode_strs def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] ): return self.bpe_tokenizer.batch_decode(A_ ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ): __lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(A_ )] return decode_strs
12
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ , a__ : int =FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" , from_pt=A_ , dtype=jnp.bfloataa ) a__ , a__ : Any =FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , controlnet=A_ , from_pt=A_ , dtype=jnp.bfloataa ) a__ : List[str] =controlnet_params a__ : Any ="bird" a__ : Any =jax.device_count() a__ : int =pipe.prepare_text_inputs([prompts] * num_samples ) a__ : List[str] =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) a__ : int =pipe.prepare_image_inputs([canny_image] * num_samples ) a__ : str =jax.random.PRNGKey(0 ) a__ : Optional[int] =jax.random.split(A_ , jax.device_count() ) a__ : Union[str, Any] =replicate(A_ ) a__ : Optional[int] =shard(A_ ) a__ : Union[str, Any] =shard(A_ ) a__ : List[Any] =pipe( prompt_ids=A_ , image=A_ , params=A_ , prng_seed=A_ , num_inference_steps=5_0 , jit=A_ , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) a__ : int =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ : List[Any] =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] a__ : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ : List[Any] =jnp.array( [0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ , a__ : Tuple =FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" , from_pt=A_ , dtype=jnp.bfloataa ) a__ , a__ : Tuple =FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , controlnet=A_ , from_pt=A_ , dtype=jnp.bfloataa ) a__ : Optional[int] =controlnet_params a__ : Any ="Chef in the kitchen" a__ : List[Any] =jax.device_count() a__ : Tuple =pipe.prepare_text_inputs([prompts] * num_samples ) a__ : Dict =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) a__ : Optional[Any] =pipe.prepare_image_inputs([pose_image] * num_samples ) a__ : Optional[int] =jax.random.PRNGKey(0 ) a__ : Union[str, Any] =jax.random.split(A_ , jax.device_count() ) a__ : Optional[int] =replicate(A_ ) a__ : Dict =shard(A_ ) a__ : Dict =shard(A_ ) a__ : Optional[Any] =pipe( prompt_ids=A_ , image=A_ , params=A_ , prng_seed=A_ , num_inference_steps=5_0 , jit=A_ , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) a__ : int =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a__ : Union[str, Any] =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] a__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ : Optional[Any] =jnp.array( [[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
95
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "mvp" UpperCAmelCase__ : Tuple = ["past_key_values"] UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]: __UpperCamelCase =vocab_size __UpperCamelCase =max_position_embeddings __UpperCamelCase =d_model __UpperCamelCase =encoder_ffn_dim __UpperCamelCase =encoder_layers __UpperCamelCase =encoder_attention_heads __UpperCamelCase =decoder_ffn_dim __UpperCamelCase =decoder_layers __UpperCamelCase =decoder_attention_heads __UpperCamelCase =dropout __UpperCamelCase =attention_dropout __UpperCamelCase =activation_dropout __UpperCamelCase =activation_function __UpperCamelCase =init_std __UpperCamelCase =encoder_layerdrop __UpperCamelCase =decoder_layerdrop __UpperCamelCase =classifier_dropout __UpperCamelCase =use_cache __UpperCamelCase =encoder_layers __UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase =use_prompt __UpperCamelCase =prompt_length __UpperCamelCase =prompt_mid_dim super().__init__( pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ): __UpperCamelCase =self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
62
0
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _a = logging.get_logger(__name__) class __A ( A_ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' super().__init__() lowerCamelCase__ = nn.ModuleList(A_ ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = True , ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(A_ , A_ , self.nets ) ): lowerCamelCase__ , lowerCamelCase__ = controlnet( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) # merge samples if i == 0: lowerCamelCase__ , lowerCamelCase__ = down_samples, mid_sample else: lowerCamelCase__ = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(A_ , A_ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , ): '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = save_directory for controlnet in self.nets: controlnet.save_pretrained( A_ , is_main_process=A_ , save_function=A_ , safe_serialization=A_ , variant=A_ , ) idx += 1 lowerCamelCase__ = model_path_to_save + F'_{idx}' @classmethod def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCamelCase__ = pretrained_model_path while os.path.isdir(A_ ): lowerCamelCase__ = ControlNetModel.from_pretrained(A_ , **A_ ) controlnets.append(A_ ) idx += 1 lowerCamelCase__ = pretrained_model_path + F'_{idx}' logger.info(F'{len(A_ )} controlnets loaded from {pretrained_model_path}.' ) if len(A_ ) == 0: raise ValueError( F'No ControlNets found under {os.path.dirname(A_ )}. Expected at least {pretrained_model_path + "_0"}.' ) return cls(A_ )
209
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = GPTaTokenizer UpperCAmelCase__ : Any = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : int = {"add_prefix_space": True} UpperCAmelCase__ : Any = False def _a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _a ( self , **A_ ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='lower newer' __UpperCamelCase ='lower newer' return input_text, output_text def _a ( self ) -> List[Any]: __UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase ='lower newer' __UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) __UpperCamelCase =tokens + [tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self ) -> int: if not self.test_rust_tokenizer: return __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase ='lower newer' # Testing tokenization __UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens __UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ ) __UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ ) __UpperCamelCase =rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token __UpperCamelCase =tokens + [rust_tokenizer.unk_token] __UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def _a ( self , *A_ , **A_ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _a ( self , A_=15 ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , ) def _a ( self ) -> int: __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input'] __UpperCamelCase =('This is a simple input', 'This is a pair') __UpperCamelCase =[ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __UpperCamelCase =tokenizer.pad_token_id __UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) __UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' ) __UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ='$$$' __UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) __UpperCamelCase ='This is a simple input' __UpperCamelCase =['This is a simple input 1', 'This is a simple input 2'] __UpperCamelCase =tokenizer.bos_token_id __UpperCamelCase =tokenizer(A_ ) __UpperCamelCase =tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase =tokenizer.decode(out_s.input_ids ) __UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def _a ( self ) -> Optional[int]: pass def _a ( self ) -> Any: # TODO: change to self.get_tokenizers() when the fast version is implemented __UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase ='Encode this.' __UpperCamelCase ='This one too please.' __UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) __UpperCamelCase =tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) __UpperCamelCase =encoded_sequence_dict['input_ids'] __UpperCamelCase =encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(A_ ) , len(A_ ) ) __UpperCamelCase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] __UpperCamelCase =[x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('test_opt' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) def _a ( self ) -> Dict: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ ) __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def _a ( self ) -> List[Any]: __UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ ) __UpperCamelCase ='bos' __UpperCamelCase =tokenizer.get_vocab()['bos'] __UpperCamelCase ='A photo of a cat' __UpperCamelCase =tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('./tok' ) __UpperCamelCase =AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __UpperCamelCase =tokenizer.encode( A_ , ) self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
62
0
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A_ ( A_ , unittest.TestCase ): lowerCAmelCase__ = TransfoXLTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowerCAmelCase (self :Optional[Any] )-> Union[str, Any]: super().setUp() __A = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] __A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowerCAmelCase (self :List[str] , **_UpperCamelCase :List[str] )-> Optional[int]: __A = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _lowerCAmelCase (self :str , _UpperCamelCase :Dict )-> Tuple: __A = '''<unk> UNwanted , running''' __A = '''<unk> unwanted, running''' return input_text, output_text def _lowerCAmelCase (self :str )-> str: __A = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __A = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(A_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _lowerCAmelCase (self :Tuple )-> Any: __A = TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]: __A = TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowerCAmelCase (self :int )-> int: __A = TransfoXLTokenizer(lower_case=A_ ) __A = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' __A = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _lowerCAmelCase (self :List[str] )-> Optional[int]: __A = self.get_tokenizer() __A = len(A_ ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
117
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
62
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
43
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) order.append(SCREAMING_SNAKE_CASE__ ) return order def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ): __UpperCamelCase =True __UpperCamelCase =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return component def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ): __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] __UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1] if not visited[vert]: __UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) components_list.append(SCREAMING_SNAKE_CASE__ ) return components_list
62
0
from __future__ import annotations def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [] create_all_state(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , [] , SCREAMING_SNAKE_CASE__ ) return result def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(SCREAMING_SNAKE_CASE__ , total_number - level + 2 ): current_list.append(SCREAMING_SNAKE_CASE__ ) create_all_state(i + 1 , SCREAMING_SNAKE_CASE__ , level - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) current_list.pop() def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" for i in total_list: print(*SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase__ = 4 lowerCAmelCase__ = 2 lowerCAmelCase__ = generate_all_combinations(n, k) print_all_state(total_list)
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '▁' _A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} _A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } _A = {'vinai/bartpho-syllable': 1024} class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase =vocab_file __UpperCamelCase =monolingual_vocab_file __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase ={} __UpperCamelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =cnt cnt += 1 with open(A_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase =line.strip().split()[0] __UpperCamelCase =len(self.fairseq_tokens_to_ids ) if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =len(self.fairseq_tokens_to_ids ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> List[str]: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase =[self.cls_token_id] __UpperCamelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> Any: return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def _a ( self , A_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , A_ ) -> int: return self.fairseq_ids_to_tokens[index] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(A_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( A_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , A_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(A_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(A_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
62
0
'''simple docstring''' import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a__ : str =re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a__ : Optional[Any] =10 a__ : Tuple =256 def lowercase__ ( __lowercase : List[str] ) -> Tuple: """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS: return None __UpperCamelCase = MinHash(num_perm=SCREAMING_SNAKE_CASE__ ) for token in set(SCREAMING_SNAKE_CASE__ ): min_hash.update(token.encode() ) return min_hash def lowercase__ ( __lowercase : str ) -> Optional[int]: """simple docstring""" return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0} class snake_case : """simple docstring""" def __init__( self : Any , *, __A : str = 0.85 , ): __UpperCamelCase = duplication_jaccard_threshold __UpperCamelCase = NUM_PERM __UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __UpperCamelCase = defaultdict(A_ ) def _lowerCamelCase ( self : Tuple , __A : int , __A : Any ): __UpperCamelCase = self._index.query(A_ ) if code_key in self._index.keys: print(f'''Duplicate key {code_key}''' ) return self._index.insert(A_ , A_ ) if len(A_ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(A_ ) break else: self._duplicate_clusters[close_duplicates[0]].add(A_ ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = [] for base, duplicates in self._duplicate_clusters.items(): __UpperCamelCase = [base] + list(A_ ) # reformat the cluster to be a list of dict __UpperCamelCase = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster] duplicate_clusters.append(A_ ) return duplicate_clusters def _lowerCamelCase ( self : str , __A : Union[str, Any] ): __UpperCamelCase = self.get_duplicate_clusters() with open(A_ , 'w' ) as f: json.dump(A_ , A_ ) def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" __UpperCamelCase , __UpperCamelCase = element __UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowercase__ ( __lowercase : Type[Dataset] ) -> Optional[Any]: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def lowercase__ ( __lowercase : Type[Dataset] , __lowercase : float ) -> List[Any]: """simple docstring""" __UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=100 ) ): di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowercase__ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = get_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase = get_tokens(SCREAMING_SNAKE_CASE__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a__ : List[Any] =None def lowercase__ ( __lowercase : str , __lowercase : int ) -> List[Any]: """simple docstring""" __UpperCamelCase = [] for elementa in cluster: __UpperCamelCase = _shared_dataset[elementa['base_index']]['content'] for elementa in extremes: __UpperCamelCase = _shared_dataset[elementa['base_index']]['content'] if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __UpperCamelCase = 1 extremes.append(SCREAMING_SNAKE_CASE__ ) return extremes def lowercase__ ( __lowercase : str , __lowercase : Optional[int] , __lowercase : Any ) -> Union[str, Any]: """simple docstring""" global _shared_dataset __UpperCamelCase = dataset __UpperCamelCase = [] __UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ): extremes_list.append(SCREAMING_SNAKE_CASE__ ) return extremes_list def lowercase__ ( __lowercase : Type[Dataset] , __lowercase : float = 0.8_5 ) -> List[Any]: """simple docstring""" __UpperCamelCase = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase = {x['base_index'] for cluster in duplicate_clusters for x in cluster} __UpperCamelCase = {} __UpperCamelCase = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for extremes in extremes_clusters: for element in extremes: __UpperCamelCase = element __UpperCamelCase = duplicate_indices - set(extreme_dict.keys() ) __UpperCamelCase = dataset.filter(lambda __lowercase , __lowercase : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __UpperCamelCase = element['base_index'] in extreme_dict if element["is_extreme"]: __UpperCamelCase = extreme_dict[element['base_index']]['copies'] print(F'''Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}''' ) print(F'''Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}''' ) print(F'''Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}''' ) print(F'''Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}''' ) print(F'''Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}''' ) return ds_filter, duplicate_clusters
53
from numpy import exp, pi, sqrt def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
62
0
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = str(SCREAMING_SNAKE_CASE__ ) while len(SCREAMING_SNAKE_CASE__ ) != 1: SCREAMING_SNAKE_CASE__ = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string] SCREAMING_SNAKE_CASE__ = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ): total *= numbers[i] SCREAMING_SNAKE_CASE__ = str(SCREAMING_SNAKE_CASE__ ) steps += 1 return steps def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = str(SCREAMING_SNAKE_CASE__ ) while len(SCREAMING_SNAKE_CASE__ ) != 1: SCREAMING_SNAKE_CASE__ = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string] SCREAMING_SNAKE_CASE__ = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ): total += numbers[i] SCREAMING_SNAKE_CASE__ = str(SCREAMING_SNAKE_CASE__ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
219
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None: super().__init__(**A_ ) __UpperCamelCase =size if size is not None else {'shortest_edge': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) __UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224} __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) __UpperCamelCase =do_resize __UpperCamelCase =size __UpperCamelCase =resample __UpperCamelCase =do_center_crop __UpperCamelCase =crop_size __UpperCamelCase =do_rescale __UpperCamelCase =rescale_factor __UpperCamelCase =do_normalize __UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase =do_convert_rgb def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: __UpperCamelCase =get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image: __UpperCamelCase =do_resize if do_resize is not None else self.do_resize __UpperCamelCase =size if size is not None else self.size __UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ ) __UpperCamelCase =resample if resample is not None else self.resample __UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase =crop_size if crop_size is not None else self.crop_size __UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) __UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase =image_mean if image_mean is not None else self.image_mean __UpperCamelCase =image_std if image_std is not None else self.image_std __UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase =make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase =[convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. __UpperCamelCase =[to_numpy_array(A_ ) for image in images] if do_resize: __UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images] __UpperCamelCase ={'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
62
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( A_ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : int = KandinskyImgaImgPipeline UpperCAmelCase_ : List[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"] UpperCAmelCase_ : str = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] UpperCAmelCase_ : Optional[int] = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] UpperCAmelCase_ : Optional[Any] = False @property def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]: return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> Dict: return 32 @property def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: return self.time_input_dim @property def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: return 100 @property def UpperCAmelCase_ ( self : List[str] ) -> str: UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def UpperCAmelCase_ ( self : List[Any] ) -> int: torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) UpperCAmelCase : List[str] = MultilingualCLIP(A_ ) UpperCAmelCase : Optional[Any] = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self : Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } UpperCAmelCase : Tuple = UNetaDConditionModel(**A_ ) return model @property def UpperCAmelCase_ ( self : Tuple ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self : int ) -> int: torch.manual_seed(0 ) UpperCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self : List[str] ) -> Any: UpperCAmelCase : List[str] = self.dummy_text_encoder UpperCAmelCase : Optional[int] = self.dummy_tokenizer UpperCAmelCase : List[Any] = self.dummy_unet UpperCAmelCase : Union[str, Any] = self.dummy_movq UpperCAmelCase : str = { 'num_train_timesteps': 1_000, 'beta_schedule': 'linear', 'beta_start': 0.0_0085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } UpperCAmelCase : List[str] = DDIMScheduler(**A_ ) UpperCAmelCase : List[str] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any , lowercase_ : List[str]=0 ) -> int: UpperCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : Dict = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) ) if str(A_ ).startswith('mps' ): UpperCAmelCase : List[str] = torch.manual_seed(A_ ) else: UpperCAmelCase : List[str] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCAmelCase : List[Any] = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: UpperCAmelCase : List[Any] = 'cpu' UpperCAmelCase : Any = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A_ ) UpperCAmelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(A_ ) ) UpperCAmelCase : Union[str, Any] = output.images UpperCAmelCase : Union[str, Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase : Any = np.array( [0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self : Optional[int] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : List[str] ) -> List[str]: UpperCAmelCase : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) UpperCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) UpperCAmelCase : List[Any] = 'A red cartoon frog, 4k' UpperCAmelCase : Tuple = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCAmelCase : str = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa ) UpperCAmelCase : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCAmelCase , UpperCAmelCase : str = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() UpperCAmelCase : Optional[int] = pipeline( A_ , image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) UpperCAmelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
151
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "yolos" def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =qkv_bias __UpperCamelCase =num_detection_tokens __UpperCamelCase =use_mid_position_embeddings __UpperCamelCase =auxiliary_loss # Hungarian matcher __UpperCamelCase =class_cost __UpperCamelCase =bbox_cost __UpperCamelCase =giou_cost # Loss coefficients __UpperCamelCase =bbox_loss_coefficient __UpperCamelCase =giou_loss_coefficient __UpperCamelCase =eos_coefficient class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _a ( self ) -> float: return 1E-4 @property def _a ( self ) -> int: return 12
62
0
'''simple docstring''' import cva import numpy as np class _lowercase : '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if k in (0.0_4, 0.0_6): __lowerCAmelCase = k __lowerCAmelCase = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : List[Any] ) -> str: return str(self.k ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> tuple[cva.Mat, list[list[int]]]: __lowerCAmelCase = cva.imread(A_ , 0 ) __lowerCAmelCase , __lowerCAmelCase = img.shape __lowerCAmelCase = [] __lowerCAmelCase = img.copy() __lowerCAmelCase = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB ) __lowerCAmelCase , __lowerCAmelCase = np.gradient(A_ ) __lowerCAmelCase = dx**2 __lowerCAmelCase = dy**2 __lowerCAmelCase = dx * dy __lowerCAmelCase = 0.0_4 __lowerCAmelCase = self.window_size // 2 for y in range(A_ , h - offset ): for x in range(A_ , w - offset ): __lowerCAmelCase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase = (wxx * wyy) - (wxy**2) __lowerCAmelCase = wxx + wyy __lowerCAmelCase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": _A : str = HarrisCorner(0.0_4, 3) _A , _A : int = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
229
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _A = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class A_ ( A_ ): _lowerCamelCase : Union[str, Any] = ["pixel_values"] def __init__( self : Tuple , snake_case_ : str = True , snake_case_ : List[str] = None , snake_case_ : Union[str, Any] = PILImageResampling.BICUBIC , snake_case_ : Dict = True , snake_case_ : Optional[int] = None , snake_case_ : Any = True , snake_case_ : str = 1 / 2_5_5 , snake_case_ : str = True , snake_case_ : Optional[Any] = None , snake_case_ : Union[str, Any] = None , snake_case_ : Union[str, Any] = True , **snake_case_ : List[str] , ): super().__init__(**A_ ) _UpperCAmelCase = size if size is not None else {"shortest_edge": 2_2_4} _UpperCAmelCase = get_size_dict(A_ , default_to_square=A_ ) _UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} _UpperCAmelCase = get_size_dict(A_ , default_to_square=A_ , param_name="crop_size" ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCAmelCase = do_convert_rgb def lowercase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Dict = PILImageResampling.BICUBIC , snake_case_ : List[Any] = None , **snake_case_ : List[str] , ): _UpperCAmelCase = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def lowercase ( self : int , snake_case_ : int , snake_case_ : int , snake_case_ : str = None , **snake_case_ : Tuple , ): _UpperCAmelCase = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ ) def lowercase ( self : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] = None , **snake_case_ : Optional[int] , ): return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def lowercase ( self : int , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] = None , **snake_case_ : Tuple , ): return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def lowercase ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] = None , snake_case_ : int = None , snake_case_ : str = None , snake_case_ : Tuple = None , snake_case_ : Tuple = None , snake_case_ : Any = None , snake_case_ : Dict = None , snake_case_ : Any = None , snake_case_ : Any = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : int = None , snake_case_ : List[str] = ChannelDimension.FIRST , **snake_case_ : int , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(A_ , param_name="size" , default_to_square=A_ ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(A_ , param_name="crop_size" , default_to_square=A_ ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(A_ ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(A_ , A_ ) for image in images] _UpperCAmelCase = {"pixel_values": images} return BatchFeature(data=A_ , tensor_type=A_ )
22
from __future__ import annotations import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ ) -> None: __UpperCamelCase =size # approximate the overall size of segment tree with given value __UpperCamelCase =[0 for i in range(0 , 4 * size )] # create array to store lazy update __UpperCamelCase =[0 for i in range(0 , 4 * size )] __UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _a ( self , A_ ) -> int: return idx * 2 def _a ( self , A_ ) -> int: return idx * 2 + 1 def _a ( self , A_ , A_ , A_ , A_ ) -> None: if left_element == right_element: __UpperCamelCase =a[left_element - 1] else: __UpperCamelCase =(left_element + right_element) // 2 self.build(self.left(A_ ) , A_ , A_ , A_ ) self.build(self.right(A_ ) , mid + 1 , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase =val if left_element != right_element: __UpperCamelCase =val __UpperCamelCase =val __UpperCamelCase =True __UpperCamelCase =True return True __UpperCamelCase =(left_element + right_element) // 2 self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ ) self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ ) __UpperCamelCase =max( self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] ) return True def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float: if self.flag[idx] is True: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =False if left_element != right_element: __UpperCamelCase =self.lazy[idx] __UpperCamelCase =self.lazy[idx] __UpperCamelCase =True __UpperCamelCase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase =(left_element + right_element) // 2 __UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ ) __UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ ) return max(A_ , A_ ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _A = 15 _A = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
62
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowerCamelCase__( A_): UpperCAmelCase__ : Union[str, Any] = "beit" def __init__( self: Tuple , UpperCamelCase_: Dict=81_92 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Dict=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: List[str]=30_72 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: List[Any]=2_24 , UpperCamelCase_: Any=16 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Any=False , UpperCamelCase_: int=False , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Tuple=[3, 5, 7, 11] , UpperCamelCase_: Optional[Any]=[1, 2, 3, 6] , UpperCamelCase_: int=True , UpperCamelCase_: str=0.4 , UpperCamelCase_: str=2_56 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Dict=2_55 , **UpperCamelCase_: Any , ): super().__init__(**A_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = use_mask_token __lowerCamelCase = use_absolute_position_embeddings __lowerCamelCase = use_relative_position_bias __lowerCamelCase = use_shared_relative_position_bias __lowerCamelCase = layer_scale_init_value __lowerCamelCase = drop_path_rate __lowerCamelCase = use_mean_pooling # decode head attributes (semantic segmentation) __lowerCamelCase = out_indices __lowerCamelCase = pool_scales # auxiliary head attributes (semantic segmentation) __lowerCamelCase = use_auxiliary_head __lowerCamelCase = auxiliary_loss_weight __lowerCamelCase = auxiliary_channels __lowerCamelCase = auxiliary_num_convs __lowerCamelCase = auxiliary_concat_input __lowerCamelCase = semantic_loss_ignore_index class lowerCamelCase__( A_): UpperCAmelCase__ : Tuple = version.parse('1.11') @property def lowerCAmelCase__ ( self: List[str] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase__ ( self: Optional[Any] ): return 1E-4
12
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCAmelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCAmelCase : Any = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCAmelCase : Optional[int] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : Dict =len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] ) return (item, float(SCREAMING_SNAKE_CASE__ )) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : Dict =random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) a__ : Tuple =parent_a[:random_slice] + parent_a[random_slice:] a__ : str =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[str] ): """simple docstring""" a__ : str =list(SCREAMING_SNAKE_CASE__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: a__ : Union[str, Any] =random.choice(SCREAMING_SNAKE_CASE__ ) return "".join(SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE : tuple[str, float] , SCREAMING_SNAKE_CASE : list[tuple[str, float]] , SCREAMING_SNAKE_CASE : list[str] , ): """simple docstring""" a__ : int =[] # Generate more children proportionally to the fitness score. a__ : Union[str, Any] =int(parent_a[1] * 100 ) + 1 a__ : Any =10 if child_n >= 10 else child_n for _ in range(SCREAMING_SNAKE_CASE__ ): a__ : Tuple =population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0] a__ , a__ : int =crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ ) # Append new string to the population list. pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return pop def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[str] , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" if N_POPULATION < N_SELECTED: a__ : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Verify that the target contains no genes besides the ones inside genes variable. a__ : int =sorted({c for c in target if c not in genes} ) if not_in_genes_list: a__ : int =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Generate random starting population. a__ : str =[] for _ in range(SCREAMING_SNAKE_CASE__ ): population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) ) # Just some logs to know what the algorithms is doing. a__ , a__ : Any =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(SCREAMING_SNAKE_CASE__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. a__ : Union[str, Any] =[evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population] # Check if there is a matching evolution. a__ : List[str] =sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. a__ : Optional[Any] =population[: int(N_POPULATION / 3 )] population.clear() population.extend(SCREAMING_SNAKE_CASE__ ) # Normalize population score to be between 0 and 1. a__ : Any =[ (item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score ] # This is selection for i in range(SCREAMING_SNAKE_CASE__ ): population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION: break if __name__ == "__main__": UpperCAmelCase : Optional[int] = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCAmelCase : int = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
95
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip_vision_model" def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple: super().__init__(**A_ ) __UpperCamelCase =hidden_size __UpperCamelCase =intermediate_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =patch_size __UpperCamelCase =image_size __UpperCamelCase =initializer_range __UpperCamelCase =attention_dropout __UpperCamelCase =layer_norm_eps __UpperCamelCase =hidden_act __UpperCamelCase =qkv_bias @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "instructblip_qformer" def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase =vocab_size __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =hidden_act __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =max_position_embeddings __UpperCamelCase =initializer_range __UpperCamelCase =layer_norm_eps __UpperCamelCase =position_embedding_type __UpperCamelCase =cross_attention_frequency __UpperCamelCase =encoder_hidden_size @classmethod def _a ( cls , A_ , **A_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(A_ ) __UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCamelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A_ , **A_ ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "instructblip" UpperCAmelCase__ : Optional[Any] = True def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]: super().__init__(**A_ ) if vision_config is None: __UpperCamelCase ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCamelCase ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCamelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCamelCase =InstructBlipVisionConfig(**A_ ) __UpperCamelCase =InstructBlipQFormerConfig(**A_ ) __UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ ) __UpperCamelCase =self.text_config.tie_word_embeddings __UpperCamelCase =self.text_config.is_encoder_decoder __UpperCamelCase =num_query_tokens __UpperCamelCase =self.vision_config.hidden_size __UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCamelCase =1.0 __UpperCamelCase =0.02 @classmethod def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =copy.deepcopy(self.__dict__ ) __UpperCamelCase =self.vision_config.to_dict() __UpperCamelCase =self.qformer_config.to_dict() __UpperCamelCase =self.text_config.to_dict() __UpperCamelCase =self.__class__.model_type return output
62
0
import argparse import os import re _a = "src/transformers/models/auto" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict _a = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict") # re pattern that matches identifiers in mappings _a = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"") def lowerCAmelCase__(__snake_case ,__snake_case = False ) -> int: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ ,'''r''' ,encoding='''utf-8''' ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = content.split('''\n''' ) lowerCamelCase__ = [] lowerCamelCase__ = 0 while line_idx < len(SCREAMING_SNAKE_CASE__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase__ = len(re.search(R'''^(\s*)\S''' ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase__ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase__ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase__ = sorted(SCREAMING_SNAKE_CASE__ ,key=lambda __snake_case : _re_identifier.search(SCREAMING_SNAKE_CASE__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(SCREAMING_SNAKE_CASE__ ,'''w''' ,encoding='''utf-8''' ) as f: f.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) elif "\n".join(SCREAMING_SNAKE_CASE__ ) != content: return True def lowerCAmelCase__(__snake_case = False ) -> Tuple: '''simple docstring''' lowerCamelCase__ = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for f in os.listdir(SCREAMING_SNAKE_CASE__ ) if f.endswith('''.py''' )] lowerCamelCase__ = [sort_auto_mapping(SCREAMING_SNAKE_CASE__ ,overwrite=SCREAMING_SNAKE_CASE__ ) for fname in fnames] if not overwrite and any(SCREAMING_SNAKE_CASE__ ): lowerCamelCase__ = [f for f, d in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE__ )}. Run `make style` to fix' ''' this.''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") _a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
209
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _A = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _A = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(rows * cols * num_images ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) __UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =labels_dense.shape[0] __UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes __UpperCamelCase =numpy.zeros((num_labels, num_classes) ) __UpperCamelCase =1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream: __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) __UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return labels class UpperCAmelCase__ : """simple docstring""" @deprecated( A_ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: __UpperCamelCase =10000 __UpperCamelCase =one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'images.shape: {images.shape} labels.shape: {labels.shape}' __UpperCamelCase =images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase =images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase =images.astype(numpy.floataa ) __UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 ) __UpperCamelCase =images __UpperCamelCase =labels __UpperCamelCase =0 __UpperCamelCase =0 @property def _a ( self ) -> Tuple: return self._images @property def _a ( self ) -> Union[str, Any]: return self._labels @property def _a ( self ) -> Optional[Any]: return self._num_examples @property def _a ( self ) -> List[str]: return self._epochs_completed def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]: if fake_data: __UpperCamelCase =[1] * 784 __UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) __UpperCamelCase =self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perma] __UpperCamelCase =self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase =self._num_examples - start __UpperCamelCase =self._images[start : self._num_examples] __UpperCamelCase =self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase =numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) __UpperCamelCase =self.images[perm] __UpperCamelCase =self.labels[perm] # Start next epoch __UpperCamelCase =0 __UpperCamelCase =batch_size - rest_num_examples __UpperCamelCase =self._index_in_epoch __UpperCamelCase =self._images[start:end] __UpperCamelCase =self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase =self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): gfile.MakeDirs(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not gfile.Exists(SCREAMING_SNAKE_CASE__ ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f: __UpperCamelCase =f.size() print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' ) return filepath @deprecated( SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =fake() __UpperCamelCase =fake() __UpperCamelCase =fake() return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ ) if not source_url: # empty string check __UpperCamelCase =DEFAULT_SOURCE_URL __UpperCamelCase ='train-images-idx3-ubyte.gz' __UpperCamelCase ='train-labels-idx1-ubyte.gz' __UpperCamelCase ='t10k-images-idx3-ubyte.gz' __UpperCamelCase ='t10k-labels-idx1-ubyte.gz' __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_maybe_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f: __UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =( 'Validation size should be between 0 and ' F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =train_images[:validation_size] __UpperCamelCase =train_labels[:validation_size] __UpperCamelCase =train_images[validation_size:] __UpperCamelCase =train_labels[validation_size:] __UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed} __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
62
0
import fire from utils import calculate_rouge, save_json def _a ( lowerCamelCase: int , lowerCamelCase: Union[str, Any] , lowerCamelCase: List[Any]=None , **lowerCamelCase: List[Any] ) -> Dict: '''simple docstring''' __A = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()] __A = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()][: len(SCREAMING_SNAKE_CASE__ )] __A = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if save_path is not None: save_json(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
117
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = TransfoXLTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False def _a ( self ) -> Union[str, Any]: super().setUp() __UpperCamelCase =[ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _a ( self , **A_ ) -> Optional[int]: __UpperCamelCase =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , A_ ) -> Tuple: __UpperCamelCase ='<unk> UNwanted , running' __UpperCamelCase ='<unk> unwanted, running' return input_text, output_text def _a ( self ) -> str: __UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ ) __UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] ) def _a ( self ) -> Any: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _a ( self ) -> int: __UpperCamelCase =TransfoXLTokenizer(lower_case=A_ ) __UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' __UpperCamelCase =[ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =len(A_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , 'new1' )
62
0
from __future__ import annotations import math class lowerCamelCase_ : '''simple docstring''' def __init__( self , __lowercase) -> None: __UpperCamelCase :Union[str, Any] = size # approximate the overall size of segment tree with given value __UpperCamelCase :Union[str, Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __UpperCamelCase :List[str] = [0 for i in range(0 , 4 * size)] __UpperCamelCase :Optional[int] = [0 for i in range(0 , 4 * size)] # flag for lazy update def UpperCamelCase__ ( self , __lowercase) -> int: return idx * 2 def UpperCamelCase__ ( self , __lowercase) -> int: return idx * 2 + 1 def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase) -> None: if left_element == right_element: __UpperCamelCase :List[str] = a[left_element - 1] else: __UpperCamelCase :Optional[Any] = (left_element + right_element) // 2 self.build(self.left(A_) , A_ , A_ , A_) self.build(self.right(A_) , mid + 1 , A_ , A_) __UpperCamelCase :int = max( self.segment_tree[self.left(A_)] , self.segment_tree[self.right(A_)]) def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> bool: if self.flag[idx] is True: __UpperCamelCase :List[str] = self.lazy[idx] __UpperCamelCase :Any = False if left_element != right_element: __UpperCamelCase :Any = self.lazy[idx] __UpperCamelCase :List[str] = self.lazy[idx] __UpperCamelCase :str = True __UpperCamelCase :int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __UpperCamelCase :Tuple = val if left_element != right_element: __UpperCamelCase :Union[str, Any] = val __UpperCamelCase :Union[str, Any] = val __UpperCamelCase :int = True __UpperCamelCase :List[Any] = True return True __UpperCamelCase :Dict = (left_element + right_element) // 2 self.update(self.left(A_) , A_ , A_ , A_ , A_ , A_) self.update(self.right(A_) , mid + 1 , A_ , A_ , A_ , A_) __UpperCamelCase :List[str] = max( self.segment_tree[self.left(A_)] , self.segment_tree[self.right(A_)]) return True def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int | float: if self.flag[idx] is True: __UpperCamelCase :Any = self.lazy[idx] __UpperCamelCase :Union[str, Any] = False if left_element != right_element: __UpperCamelCase :Tuple = self.lazy[idx] __UpperCamelCase :int = self.lazy[idx] __UpperCamelCase :Optional[Any] = True __UpperCamelCase :Optional[Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __UpperCamelCase :int = (left_element + right_element) // 2 __UpperCamelCase :str = self.query(self.left(A_) , A_ , A_ , A_ , A_) __UpperCamelCase :List[Any] = self.query(self.right(A_) , mid + 1 , A_ , A_ , A_) return max(A_ , A_) def __str__( self) -> str: return str([self.query(1 , 1 , self.size , A_ , A_) for i in range(1 , self.size + 1)]) if __name__ == "__main__": __lowercase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] __lowercase = 15 __lowercase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger('''transformers.models.encodec''') lowerCAmelCase__ = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } lowerCAmelCase__ = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } lowerCAmelCase__ = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } lowerCAmelCase__ = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } lowerCAmelCase__ = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } lowerCAmelCase__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } lowerCAmelCase__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } lowerCAmelCase__ = [] lowerCAmelCase__ = [] def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for attribute in key.split("." ): lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: lowercase__ : Tuple = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Tuple = value elif weight_type == "weight_v": lowercase__ : Optional[int] = value elif weight_type == "bias": lowercase__ : Tuple = value elif weight_type == "running_mean": lowercase__ : str = value elif weight_type == "running_var": lowercase__ : int = value elif weight_type == "num_batches_tracked": lowercase__ : int = value elif weight_type == "weight_ih_l0": lowercase__ : List[Any] = value elif weight_type == "weight_hh_l0": lowercase__ : Any = value elif weight_type == "bias_ih_l0": lowercase__ : Tuple = value elif weight_type == "bias_hh_l0": lowercase__ : Any = value elif weight_type == "weight_ih_l1": lowercase__ : Any = value elif weight_type == "weight_hh_l1": lowercase__ : int = value elif weight_type == "bias_ih_l1": lowercase__ : Union[str, Any] = value elif weight_type == "bias_hh_l1": lowercase__ : int = value else: lowercase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase__ , lowercase__ : Optional[Any] = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = [] if model_name == "encodec_24khz" or "encodec_32khz": lowercase__ : Union[str, Any] = MAPPING_24K elif model_name == "encodec_48khz": lowercase__ : Optional[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): logger.info(F"""{name} was ignored""" ) continue lowercase__ : Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: lowercase__ , lowercase__ : Tuple = key.split(".*." ) if prefix in name and suffix in name: lowercase__ : Optional[int] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowercase__ : Any = True if "*" in mapped_key: lowercase__ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2] lowercase__ : Optional[int] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: lowercase__ : List[Any] = "weight_g" elif "weight_v" in name: lowercase__ : Optional[Any] = "weight_v" elif "weight_ih_l0" in name: lowercase__ : str = "weight_ih_l0" elif "weight_hh_l0" in name: lowercase__ : List[str] = "weight_hh_l0" elif "bias_ih_l0" in name: lowercase__ : Optional[Any] = "bias_ih_l0" elif "bias_hh_l0" in name: lowercase__ : Union[str, Any] = "bias_hh_l0" elif "weight_ih_l1" in name: lowercase__ : str = "weight_ih_l1" elif "weight_hh_l1" in name: lowercase__ : Optional[Any] = "weight_hh_l1" elif "bias_ih_l1" in name: lowercase__ : Dict = "bias_ih_l1" elif "bias_hh_l1" in name: lowercase__ : Union[str, Any] = "bias_hh_l1" elif "bias" in name: lowercase__ : Dict = "bias" elif "weight" in name: lowercase__ : Tuple = "weight" elif "running_mean" in name: lowercase__ : Any = "running_mean" elif "running_var" in name: lowercase__ : List[str] = "running_var" elif "num_batches_tracked" in name: lowercase__ : str = "num_batches_tracked" else: lowercase__ : Dict = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" if config_path is not None: lowercase__ : Tuple = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: lowercase__ : Tuple = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowercase__ : List[Any] = [8, 5, 4, 4] lowercase__ : Union[str, Any] = [2.2] lowercase__ : Optional[int] = 64 lowercase__ : Optional[Any] = 32_000 lowercase__ : Optional[Any] = 2_048 lowercase__ : List[str] = False lowercase__ : Optional[Any] = False lowercase__ : Dict = False elif model_name == "encodec_48khz": lowercase__ : Any = [8, 5, 4, 2] lowercase__ : Optional[Any] = [3.0, 6.0, 12.0, 24.0] lowercase__ : Tuple = 48_000 lowercase__ : int = 2 lowercase__ : Any = False lowercase__ : int = "time_group_norm" lowercase__ : Tuple = True lowercase__ : int = 1.0 lowercase__ : str = 0.01 else: raise ValueError(F"""Unknown model name: {model_name}""" ) lowercase__ : List[str] = EncodecModel(SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowercase__ : List[Any] = original_checkpoint["best_state"] recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
130
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f: __UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[] next(SCREAMING_SNAKE_CASE__ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =[] for dataset in encoded_datasets: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa ) __UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1 __UpperCamelCase =with_conta __UpperCamelCase =with_conta __UpperCamelCase =mc_label __UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' ) __UpperCamelCase =parser.parse_args() print(SCREAMING_SNAKE_CASE__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __UpperCamelCase =['_start_', '_delimiter_', '_classify_'] __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) model.to(SCREAMING_SNAKE_CASE__ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj] logger.info('Encoding dataset...' ) __UpperCamelCase =load_rocstories_dataset(args.train_dataset ) __UpperCamelCase =load_rocstories_dataset(args.eval_dataset ) __UpperCamelCase =(train_dataset, eval_dataset) __UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) # Compute the max input length for the Transformer __UpperCamelCase =model.config.n_positions // 2 - 2 __UpperCamelCase =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1] __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size ) __UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __UpperCamelCase =args.max_steps __UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1 else: __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs __UpperCamelCase =list(model.named_parameters() ) __UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __UpperCamelCase =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon ) __UpperCamelCase =get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ ) if args.do_train: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __UpperCamelCase =0 __UpperCamelCase =0 __UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch __UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __UpperCamelCase =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE__ ) if args.do_eval: model.eval() __UpperCamelCase , __UpperCamelCase =0, 0 __UpperCamelCase , __UpperCamelCase =0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ): __UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch with torch.no_grad(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model( SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =mc_logits.detach().cpu().numpy() __UpperCamelCase =mc_labels.to('cpu' ).numpy() __UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __UpperCamelCase =eval_loss / nb_eval_steps __UpperCamelCase =eval_accuracy / nb_eval_examples __UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None __UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
62
0
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowercase__ ( __lowercase : NDArray[floataa] , __lowercase : NDArray[floataa] , __lowercase : list[int] , __lowercase : int , ) -> List[str]: """simple docstring""" __UpperCamelCase , __UpperCamelCase = coefficient_matrix.shape __UpperCamelCase , __UpperCamelCase = constant_matrix.shape if rowsa != colsa: __UpperCamelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if colsa != 1: __UpperCamelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if rowsa != rowsa: __UpperCamelCase = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != rowsa: __UpperCamelCase = ( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(SCREAMING_SNAKE_CASE__ )} and {rowsa}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) __UpperCamelCase = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __UpperCamelCase , __UpperCamelCase = table.shape strictly_diagonally_dominant(SCREAMING_SNAKE_CASE__ ) # Iterates the whole matrix for given number of times for _ in range(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase = [] for row in range(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase = 0 for col in range(SCREAMING_SNAKE_CASE__ ): if col == row: __UpperCamelCase = table[row][col] elif col == cols - 1: __UpperCamelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __UpperCamelCase = (temp + val) / denom new_val.append(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase = new_val return [float(SCREAMING_SNAKE_CASE__ ) for i in new_val] def lowercase__ ( __lowercase : NDArray[floataa] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase , __UpperCamelCase = table.shape __UpperCamelCase = True for i in range(0 , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
53
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ): __UpperCamelCase =1 __UpperCamelCase =0 __UpperCamelCase =1 __UpperCamelCase =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
62
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __snake_case ( A_ ): lowerCAmelCase_ = "yolos" def __init__( self : str , _lowercase : Tuple=7_68 , _lowercase : Tuple=12 , _lowercase : List[Any]=12 , _lowercase : int=30_72 , _lowercase : Tuple="gelu" , _lowercase : Optional[int]=0.0 , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.02 , _lowercase : Tuple=1E-12 , _lowercase : List[Any]=[5_12, 8_64] , _lowercase : List[Any]=16 , _lowercase : Tuple=3 , _lowercase : Optional[int]=True , _lowercase : Dict=1_00 , _lowercase : Dict=True , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=1 , _lowercase : Optional[Any]=5 , _lowercase : List[str]=2 , _lowercase : Tuple=5 , _lowercase : List[str]=2 , _lowercase : Tuple=0.1 , **_lowercase : Dict , ): """simple docstring""" super().__init__(**A_ ) SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = qkv_bias SCREAMING_SNAKE_CASE__ = num_detection_tokens SCREAMING_SNAKE_CASE__ = use_mid_position_embeddings SCREAMING_SNAKE_CASE__ = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE__ = class_cost SCREAMING_SNAKE_CASE__ = bbox_cost SCREAMING_SNAKE_CASE__ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ = giou_loss_coefficient SCREAMING_SNAKE_CASE__ = eos_coefficient class __snake_case ( A_ ): lowerCAmelCase_ = version.parse("1.11" ) @property def __a ( self : List[str] ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __a ( self : str ): """simple docstring""" return 1E-4 @property def __a ( self : Union[str, Any] ): """simple docstring""" return 12
219
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
0