code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = torch.load(snake_case , map_location="cpu" )
if "model" in sd.keys():
snake_case_ = torch.load(snake_case , map_location="cpu" )["model"]
# pop unnecessary weights
snake_case_ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case )
snake_case_ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case_ = sd.pop(snake_case )
snake_case_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case_ = sd[key]
# We split QKV in separate Q,K,V
snake_case_ = key.replace(".qkv_proj." , ".q_proj." )
snake_case_ = key.replace(".qkv_proj." , ".k_proj." )
snake_case_ = key.replace(".qkv_proj." , ".v_proj." )
snake_case_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case_ , snake_case_ , snake_case_ = torch.split(snake_case , depth // 3 , dim=0 )
snake_case_ = q
snake_case_ = k
snake_case_ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any]=None ):
'''simple docstring'''
snake_case_ = load_checkpoint(snake_case )
if config is not None:
snake_case_ = OPTConfig.from_pretrained(snake_case )
else:
snake_case_ = OPTConfig()
snake_case_ = OPTModel(snake_case ).half().eval()
model.load_state_dict(snake_case )
# Check results
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 85 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """deta"""
__snake_case : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=9_00 , UpperCAmelCase : Tuple=20_48 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : str=10_24 , UpperCAmelCase : Dict=8 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : int="relu" , UpperCAmelCase : str=2_56 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : str=1.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=3_00 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=1 , UpperCAmelCase : Optional[int]=5 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : int=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=0.25 , **UpperCAmelCase : Any , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Tuple = backbone_config.pop("""model_type""" )
lowerCAmelCase_ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : int = config_class.from_dict(UpperCAmelCase )
lowerCAmelCase_ : Dict = backbone_config
lowerCAmelCase_ : List[str] = num_queries
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : int = encoder_ffn_dim
lowerCAmelCase_ : Tuple = encoder_layers
lowerCAmelCase_ : int = encoder_attention_heads
lowerCAmelCase_ : str = decoder_ffn_dim
lowerCAmelCase_ : Union[str, Any] = decoder_layers
lowerCAmelCase_ : Dict = decoder_attention_heads
lowerCAmelCase_ : Tuple = dropout
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Tuple = activation_dropout
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : List[Any] = init_std
lowerCAmelCase_ : int = init_xavier_std
lowerCAmelCase_ : str = encoder_layerdrop
lowerCAmelCase_ : Dict = auxiliary_loss
lowerCAmelCase_ : Dict = position_embedding_type
# deformable attributes
lowerCAmelCase_ : Any = num_feature_levels
lowerCAmelCase_ : Optional[int] = encoder_n_points
lowerCAmelCase_ : int = decoder_n_points
lowerCAmelCase_ : List[str] = two_stage
lowerCAmelCase_ : str = two_stage_num_proposals
lowerCAmelCase_ : Dict = with_box_refine
lowerCAmelCase_ : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCAmelCase_ : int = class_cost
lowerCAmelCase_ : int = bbox_cost
lowerCAmelCase_ : Any = giou_cost
# Loss coefficients
lowerCAmelCase_ : int = mask_loss_coefficient
lowerCAmelCase_ : Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ : int = bbox_loss_coefficient
lowerCAmelCase_ : List[Any] = giou_loss_coefficient
lowerCAmelCase_ : Union[str, Any] = eos_coefficient
lowerCAmelCase_ : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
return self.encoder_attention_heads
@property
def A ( self : Union[str, Any] ):
return self.d_model
def A ( self : str ):
lowerCAmelCase_ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Tuple = self.backbone_config.to_dict()
lowerCAmelCase_ : Any = self.__class__.model_type
return output
| 28 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """spiece.model"""}
__lowercase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
__lowercase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
__lowercase = 0
__lowercase = 1
__lowercase = 2
__lowercase = 3
__lowercase = 4
class _A ( __snake_case ):
"""simple docstring"""
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = """left"""
def __init__( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<sep>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : List[str]="<cls>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
a : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a : str = 3
a : Union[str, Any] = do_lower_case
a : str = remove_space
a : List[str] = keep_accents
a : Dict = vocab_file
a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
return len(self.sp_model)
def __snake_case ( self : str):
a : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[int]):
a : Optional[int] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , __UpperCAmelCase : Optional[int]):
a : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a : str = {}
a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __snake_case ( self : int , __UpperCAmelCase : List[str]):
if self.remove_space:
a : Optional[Any] = " ".join(inputs.strip().split())
else:
a : str = inputs
a : Optional[Any] = outputs.replace("``" , "\"").replace("''" , "\"")
if not self.keep_accents:
a : List[Any] = unicodedata.normalize("NFKD" , __UpperCAmelCase)
a : Tuple = "".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase)])
if self.do_lower_case:
a : Union[str, Any] = outputs.lower()
return outputs
def __snake_case ( self : str , __UpperCAmelCase : Any):
a : Optional[Any] = self.preprocess_text(__UpperCAmelCase)
a : Any = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase)
a : List[Any] = []
for piece in pieces:
if len(__UpperCAmelCase) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
a : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
a : Tuple = cur_pieces[1:]
else:
a : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__UpperCAmelCase)
else:
new_pieces.append(__UpperCAmelCase)
return new_pieces
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
return self.sp_model.PieceToId(__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
return self.sp_model.IdToPiece(__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Optional[int]):
a : Optional[int] = "".join(__UpperCAmelCase).replace(__UpperCAmelCase , " ").strip()
return out_string
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] = False , __UpperCAmelCase : Any = None , __UpperCAmelCase : List[str] = True , **__UpperCAmelCase : Optional[Any] , ):
a : Optional[Any] = kwargs.pop("use_source_tokenizer" , __UpperCAmelCase)
a : Tuple = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a : str = []
a : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase))
a : Dict = []
sub_texts.append(__UpperCAmelCase)
else:
current_sub_text.append(__UpperCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a : Any = "".join(__UpperCAmelCase)
a : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a : Optional[int] = self.clean_up_tokenization(__UpperCAmelCase)
return clean_text
else:
return text
def __snake_case ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] = None):
a : int = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any = None , __UpperCAmelCase : Optional[Any] = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase)
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase)) + [1] + ([0] * len(__UpperCAmelCase)) + [1, 1]
return ([0] * len(__UpperCAmelCase)) + [1, 1]
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any = None):
a : str = [self.sep_token_id]
a : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] = None):
if not os.path.isdir(__UpperCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(__UpperCAmelCase , "wb") as fi:
a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase)
return (out_vocab_file,)
| 40 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
_snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = initializer_range
def lowercase (self ) -> str:
_snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , )
_snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def lowercase (self ) -> Any:
_snake_case = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case = input_ids.shape[0]
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case = self._get_config_and_data()
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = lm_model(input_ids=UpperCAmelCase )
_snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
_snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase (self ) -> Any:
_snake_case = FlaxBlenderbotModelTester(self )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_snake_case = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = model_class(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_snake_case = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase (self ) -> Any:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def lowercase (self ) -> Dict:
_snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase )
_snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case = ["""Sam"""]
_snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" )
_snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase )
_snake_case = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text | 341 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = RoCBertTokenizer
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = filter_non_english
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
lowercase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowercase : Tuple = {}
lowercase : Dict = {}
for i, value in enumerate(_A ):
lowercase : Any = i
lowercase : Dict = i
lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_A , _A , ensure_ascii=_A )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_A , _A , ensure_ascii=_A )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_A , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A ) , [5, 6, 2, 5, 7, 8] )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : str = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : List[str] = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : str = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : List[Any] = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Dict = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : Tuple = RoCBertBasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase : Any = {}
for i, token in enumerate(_A ):
lowercase : Optional[int] = i
lowercase : Dict = RoCBertWordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __a ( self : int ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : str = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowercase : Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : str = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : Tuple = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase : Dict = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
lowercase : Any = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
lowercase : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = ['''的''', '''人''', '''有''']
lowercase : Any = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Union[str, Any] = True
lowercase : Dict = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : Optional[int] = tokenizer_p.encode(_A , add_special_tokens=_A )
lowercase : Optional[int] = tokenizer_r.encode(_A , add_special_tokens=_A )
lowercase : Any = tokenizer_r.convert_ids_to_tokens(_A )
lowercase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
lowercase : Tuple = False
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : Any = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
lowercase : Dict = tokenizer_p.encode(_A , add_special_tokens=_A )
lowercase : Dict = tokenizer_r.convert_ids_to_tokens(_A )
lowercase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase : Any = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
@slow
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase : Optional[int] = tokenizer.encode('''你好''' , add_special_tokens=_A )
lowercase : Union[str, Any] = tokenizer.encode('''你是谁''' , add_special_tokens=_A )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase : Union[str, Any] = '''你好,你是谁'''
lowercase : List[Any] = tokenizer.tokenize(_A )
lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A )
lowercase : int = tokenizer.convert_tokens_to_shape_ids(_A )
lowercase : Optional[int] = tokenizer.convert_tokens_to_pronunciation_ids(_A )
lowercase : Optional[Any] = tokenizer.prepare_for_model(
_A , _A , _A , add_special_tokens=_A )
lowercase : Optional[Any] = tokenizer.encode_plus(_A , add_special_tokens=_A )
self.assertEqual(_A , _A ) | 116 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 116 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int ="""naver-clova-ix/donut-base-finetuned-docvqa"""
a : str =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
a : Optional[int] ="""document_qa"""
a : Union[str, Any] =AutoProcessor
a : List[str] =VisionEncoderDecoderModel
a : str =["""image""", """text"""]
a : Optional[Any] =["""text"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCAmelCase = task_prompt.replace("""{user_input}""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.pre_processor.tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).input_ids
__lowerCAmelCase = self.pre_processor(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ),decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ),max_length=self.model.decoder.config.max_position_embeddings,early_stopping=__SCREAMING_SNAKE_CASE,pad_token_id=self.pre_processor.tokenizer.pad_token_id,eos_token_id=self.pre_processor.tokenizer.eos_token_id,use_cache=__SCREAMING_SNAKE_CASE,num_beams=1,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],return_dict_in_generate=__SCREAMING_SNAKE_CASE,).sequences
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token,"""""" )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token,"""""" )
__lowerCAmelCase = re.sub(R"""<.*?>""","""""",__SCREAMING_SNAKE_CASE,count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 46 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=False , A_=False , A_=False , A_=2 , A_=99 , A_=0 , A_=32 , A_=5 , A_=4 , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=2 , A_=4 , A_="last" , A_=True , A_=None , A_=0 , ):
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Tuple = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : List[Any] = use_input_lengths
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[int] = use_labels
UpperCamelCase : Optional[int] = gelu_activation
UpperCamelCase : Optional[Any] = sinusoidal_embeddings
UpperCamelCase : Tuple = causal
UpperCamelCase : List[Any] = asm
UpperCamelCase : List[str] = n_langs
UpperCamelCase : Any = vocab_size
UpperCamelCase : List[str] = n_special
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Any = num_labels
UpperCamelCase : Dict = num_choices
UpperCamelCase : Union[str, Any] = summary_type
UpperCamelCase : int = use_proj
UpperCamelCase : List[Any] = scope
UpperCamelCase : List[Any] = bos_token_id
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : List[str] = None
if self.use_input_lengths:
UpperCamelCase : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase : Dict = None
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[str] = XLMModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , lengths=A_ , langs=A_ )
UpperCamelCase : Optional[int] = model(A_ , langs=A_ )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[str] = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ )
UpperCamelCase : List[Any] = model(A_ , start_positions=A_ , end_positions=A_ )
UpperCamelCase : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Tuple = model(A_ )
UpperCamelCase : int = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
UpperCamelCase : Dict = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((UpperCamelCase) , ) : List[Any] = result_with_labels.to_tuple()
UpperCamelCase : Tuple = model(A_ , start_positions=A_ , end_positions=A_ )
((UpperCamelCase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Any = model(A_ )
UpperCamelCase : str = model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : List[str] = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[int] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCAmelCase :Union[str, Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
UpperCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=A_ , emb_dim=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ):
'''simple docstring'''
self.assertIsInstance(A_ , A_ )
self.assertListEqual(
[isinstance(A_ , A_ ) for iter_attentions in attentions] , [True] * len(A_ ) )
self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
UpperCamelCase : str = min_length + idx + 1
UpperCamelCase : int = min_length + idx + 1
UpperCamelCase : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_ ) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ):
'''simple docstring'''
self.assertIsInstance(A_ , A_ )
self.assertListEqual(
[isinstance(A_ , A_ ) for iter_hidden_states in hidden_states] , [True] * len(A_ ) , )
self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
UpperCamelCase : Any = min_length + idx + 1
UpperCamelCase : Optional[int] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_ ) , )
pass
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(A_ )
UpperCamelCase : Dict = torch.tensor([[14, 447]] , dtype=torch.long , device=A_ ) # the president
UpperCamelCase : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase : Optional[int] = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_ )
| 52 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaPhonemeCTCTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCAmelCase (self : Tuple ):
super().setUp()
__a : Union[str, Any] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
__a : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
def lowerCAmelCase (self : int , snake_case_ : Tuple , snake_case_ : Tuple=False , snake_case_ : str=2_0 , snake_case_ : Tuple=5 ):
__a : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case_ )) for i in range(len(snake_case_ ) )]
__a : int = list(filter(lambda snake_case_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=snake_case_ ) , snake_case_ ) )
if max_length is not None and len(snake_case_ ) > max_length:
__a : int = toks[:max_length]
if min_length is not None and len(snake_case_ ) < min_length and len(snake_case_ ) > 0:
while len(snake_case_ ) < min_length:
__a : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
__a : Tuple = [t[0] for t in toks]
# Ensure consistency
__a : Dict = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
if " " not in output_txt and len(snake_case_ ) > 1:
__a : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case_ )
)
if with_prefix_space:
__a : Tuple = ''' ''' + output_txt
__a : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
return output_txt, output_ids
def lowerCAmelCase (self : List[str] , **snake_case_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
__a : Optional[Any] = tokenizer('''m xxx ɪ''' , do_phonemize=snake_case_ ).input_ids
self.assertEqual(snake_case_ , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
__a : Union[str, Any] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=snake_case_ ).input_ids
self.assertEqual(snake_case_ , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
__a : Any = tokenizer('''maɪ c''' , do_phonemize=snake_case_ ).input_ids
self.assertEqual(snake_case_ , [3, 2_0_0] ) # mai should be <unk> (=3)
def lowerCAmelCase (self : Optional[int] ):
__a : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__a : Optional[Any] = '''Hello how are you'''
__a : List[Any] = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
self.assertEqual(snake_case_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__a : Tuple = '''Hello how are you'''
__a : Any = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(snake_case_ ).input_ids , tokenizer(snake_case_ , do_phonemize=snake_case_ ).input_ids )
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__a : Any = '''Hello how are you'''
__a : int = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
__a : Dict = tokenizer.decode(tokenizer(snake_case_ ).input_ids )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__a : Tuple = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
__a : List[Any] = tokenizer.decode(sample_ids[0] )
__a : Optional[Any] = tokenizer.batch_decode(snake_case_ )
self.assertEqual(snake_case_ , batch_tokens[0] )
self.assertEqual(snake_case_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def lowerCAmelCase (self : Optional[Any] ):
__a : int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__a : Any = '''Hello how are you'''
__a : List[Any] = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
self.assertEqual(snake_case_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__a : Optional[Any] = '''Hello how are you'''
__a : Optional[Any] = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(snake_case_ ).input_ids , tokenizer(snake_case_ , do_phonemize=snake_case_ ).input_ids )
def lowerCAmelCase (self : Any ):
__a : int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
__a : int = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
__a : Optional[int] = tokenizer.decode(sample_ids[0] )
__a : List[Any] = tokenizer.batch_decode(snake_case_ )
self.assertEqual(snake_case_ , batch_tokens[0] )
self.assertEqual(snake_case_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
__a : Optional[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=snake_case_ )
__a : List[str] = tokenizer.batch_decode(snake_case_ , filter_word_delimiter_token=snake_case_ )
self.assertEqual(snake_case_ , batch_tokens[0] )
self.assertEqual(snake_case_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def lowerCAmelCase (self : Optional[Any] ):
__a : List[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__a : Union[str, Any] = '''Hello how are you'''
__a : List[str] = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
__a : str = tokenizer.decode(tokenizer(snake_case_ ).input_ids , filter_word_delimiter_token=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__a : Optional[int] = '''Hello how are you'''
__a : Optional[Any] = tokenizer.phonemize(snake_case_ , phonemizer_lang='''en-us''' )
__a : Optional[int] = tokenizer.decode(tokenizer(snake_case_ ).input_ids , filter_word_delimiter_token=snake_case_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=snake_case_ )
__a : Any = '''Hello how are you'''
__a : Any = tokenizer(snake_case_ , phonemizer_lang='''en-us''' ).input_ids
__a : str = tokenizer(snake_case_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(snake_case_ , snake_case_ )
__a : Union[str, Any] = tokenizer.decode(snake_case_ )
__a : List[Any] = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(snake_case_ , '''ɛ l o h aʊ a ʁ j u''' )
def lowerCAmelCase (self : List[str] ):
__a : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__a : str = '''Hello how Are you'''
__a : str = '''hello how are you'''
__a : Any = tokenizer(snake_case_ ).input_ids
__a : Optional[Any] = tokenizer(snake_case_ ).input_ids
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
__a : int = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
__a : Dict = tokenizer.batch_decode(snake_case_ )
self.assertEqual(snake_case_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def lowerCAmelCase (snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[Any] = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase (self : int ):
__a : Union[str, Any] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__a : Union[str, Any] = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
__a : Dict = tokenizer.decode(snake_case_ , output_char_offsets=snake_case_ , filter_word_delimiter_token=snake_case_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def lowerCAmelCase (self : Dict ):
__a : List[str] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(snake_case_ : Optional[int] , snake_case_ : Any ):
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
self.assertTrue(isinstance(outputs_list[0] , snake_case_ ) )
# transform list to ModelOutput
__a : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(snake_case_ : str , snake_case_ : List[Any] ):
if isinstance(snake_case_ , snake_case_ ):
[recursive_check(snake_case_ , snake_case_ ) for la, la in zip(snake_case_ , snake_case_ )]
self.assertEqual(snake_case_ , snake_case_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
__a : Tuple = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__a : str = tokenizer.batch_decode(snake_case_ , output_char_offsets=snake_case_ )
__a : Optional[Any] = [tokenizer.decode(snake_case_ , output_char_offsets=snake_case_ ) for ids in sample_ids]
check_list_tuples_equal(snake_case_ , snake_case_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def lowerCAmelCase (self : Dict ):
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def lowerCAmelCase (self : Optional[int] ):
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def lowerCAmelCase (self : List[Any] ):
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def lowerCAmelCase (self : Any ):
pass
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__a : Tuple = tokenizer.vocab_size
__a : Optional[Any] = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a : Any = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a : List[str] = tokenizer.add_tokens(snake_case_ )
__a : Optional[int] = tokenizer.vocab_size
__a : Any = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size + len(snake_case_ ) )
__a : List[str] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a : Dict = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a : Optional[int] = tokenizer.add_special_tokens(snake_case_ )
__a : Dict = tokenizer.vocab_size
__a : List[Any] = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size_a + len(snake_case_ ) )
__a : Optional[Any] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowerCAmelCase (self : Dict ):
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowerCAmelCase (self : List[str] ):
pass
def lowerCAmelCase (self : Tuple ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__a : List[str] = self.get_tokenizers(fast=snake_case_ , do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__a : str = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
__a : Optional[Any] = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertIsInstance(output['''text'''] , snake_case_ )
| 367 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 90 | 0 |
import torch
from torch import nn
class _lowercase (nn.Module ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=1 , snake_case__=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ = n_token
UpperCamelCase_ = d_embed
UpperCamelCase_ = d_proj
UpperCamelCase_ = cutoffs + [n_token]
UpperCamelCase_ = [0] + self.cutoffs
UpperCamelCase_ = div_val
UpperCamelCase_ = self.cutoffs[0]
UpperCamelCase_ = len(self.cutoffs ) - 1
UpperCamelCase_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase_ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase_ = nn.ModuleList()
UpperCamelCase_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ , snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__ , snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ , UpperCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ , snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__ , r_idx - l_idx ) )
UpperCamelCase_ = keep_order
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if proj is None:
UpperCamelCase_ = nn.functional.linear(snake_case__ , snake_case__ , bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase_ = nn.functional.linear(snake_case__ , proj.t().contiguous() )
UpperCamelCase_ = nn.functional.linear(snake_case__ , snake_case__ , bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self , snake_case__ , snake_case__=None , snake_case__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase_ = hidden[..., :-1, :].contiguous()
UpperCamelCase_ = labels[..., 1:].contiguous()
UpperCamelCase_ = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCamelCase_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase_ = self._compute_logit(snake_case__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase_ = labels != -100
UpperCamelCase_ = torch.zeros_like(snake_case__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ = (
-nn.functional.log_softmax(snake_case__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase_ = nn.functional.log_softmax(snake_case__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_ , UpperCamelCase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_ , UpperCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ = self.out_layers[i].weight
UpperCamelCase_ = self.out_layers[i].bias
if i == 0:
UpperCamelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = nn.functional.log_softmax(snake_case__ , dim=1 )
if labels is None:
UpperCamelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase_ = torch.zeros_like(snake_case__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ = 0
UpperCamelCase_ = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
UpperCamelCase_ , UpperCamelCase_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase_ = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase_ = labels.index_select(0 , snake_case__ ) - l_idx
UpperCamelCase_ = head_logprob.index_select(0 , snake_case__ )
UpperCamelCase_ = hidden.index_select(0 , snake_case__ )
else:
UpperCamelCase_ = hidden
if i == 0:
if labels is not None:
UpperCamelCase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = nn.functional.log_softmax(snake_case__ , dim=1 )
UpperCamelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase_ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase_ = self._compute_logit(snake_case__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_ , UpperCamelCase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_ , UpperCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ = self.out_layers[i].weight
UpperCamelCase_ = self.out_layers[i].bias
if i == 0:
UpperCamelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase_ = nn.functional.log_softmax(snake_case__ , dim=1 )
UpperCamelCase_ = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
UpperCamelCase_ , UpperCamelCase_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase_ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ = self._compute_logit(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = nn.functional.log_softmax(snake_case__ , dim=1 )
UpperCamelCase_ = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase_ = logprob_i
return out
| 128 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Any ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8):
UpperCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12):
UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
UpperCamelCase_ = np.array(pil_image.convert("RGB"))
UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1
UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1])
UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0)
return image
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
else:
UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
UpperCamelCase_ = self.movq.config.scaling_factor * init_latents
UpperCamelCase_ = torch.cat([init_latents] , dim=0 )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = init_latents
return latents
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
UpperCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
UpperCamelCase_ = self._execution_device
UpperCamelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
UpperCamelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"]
UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
UpperCamelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = {"image_embeds": image_embeds}
UpperCamelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase_ = image * 0.5 + 0.5
UpperCamelCase_ = image.clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 128 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : List[str], lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : float = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, **lowerCAmelCase : Dict, ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : List[str] = size if size is not None else {'shortest_edge': 384}
lowercase : Any = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowercase : int = do_resize
lowercase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
lowercase : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
lowercase : List[str] = resample
lowercase : Optional[int] = do_rescale
lowercase : Optional[int] = rescale_factor
lowercase : str = do_normalize
lowercase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : float, lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Optional[int], ) -> np.ndarray:
lowercase : str = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowercase : Optional[Any] = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase : List[Any] = int(shortest_edge / crop_pct )
lowercase : Tuple = get_resize_output_image_size(lowerCAmelCase, size=lowerCAmelCase, default_to_square=lowerCAmelCase )
lowercase : Any = resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCAmelCase, size=(shortest_edge, shortest_edge), data_format=lowerCAmelCase, **lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCAmelCase, size=(shortest_edge, shortest_edge), resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : str, lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[int, float], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int, ) -> Tuple:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Any, ) -> np.ndarray:
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Tuple, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : float = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : float = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCAmelCase : Dict, ) -> PIL.Image.Image:
lowercase : Dict = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : Union[str, Any] = size if size is not None else self.size
lowercase : Optional[int] = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowercase : str = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowercase : List[str] = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, crop_pct=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : List[str] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowercase : List[Any] = [self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowercase : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__ ( ) -> str:
'''simple docstring'''
from torch.utils.cpp_extension import load
lowercase : List[str] = Path(_UpperCAmelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
lowercase : Optional[int] = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 53 | 1 |
def lowercase_ (A : str , A : int ):
snake_case__ : list[list[str]] = [[] for _ in range(A )]
snake_case__ : Dict = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(A ) <= key:
return input_string
for position, character in enumerate(A ):
snake_case__ : List[Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : str = min(A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A )
snake_case__ : Optional[int] = [''.join(A ) for row in temp_grid]
snake_case__ : Optional[Any] = ''.join(A )
return output_string
def lowercase_ (A : str , A : int ):
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
snake_case__ : list[list[str]] = [[] for _ in range(A )] # generates template
for position in range(len(A ) ):
snake_case__ : Tuple = position % (lowest * 2) # puts it in bounds
snake_case__ : List[str] = min(A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
snake_case__ : List[str] = 0
for row in temp_grid: # fills in the characters
snake_case__ : List[str] = input_string[counter : counter + len(A )]
grid.append(list(A ) )
counter += len(A )
snake_case__ : Optional[int] = '' # reads as zigzag
for position in range(len(A ) ):
snake_case__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : str = min(A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowercase_ (A : str ):
snake_case__ : Dict = {}
for key_guess in range(1 , len(A ) ): # tries every key
snake_case__ : List[str] = decrypt(A , A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 |
def lowercase_ (A : Union[str, Any] , A : List[str] , A : int , A : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case__ : Union[str, Any] = mf_knapsack(i - 1 , A , A , A )
else:
snake_case__ : Any = max(
mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , )
snake_case__ : Optional[int] = val
return f[i][j]
def lowercase_ (A : Optional[int] , A : Union[str, Any] , A : str , A : Dict ):
snake_case__ : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case__ : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case__ : str = dp[i - 1][w_]
return dp[n][w_], dp
def lowercase_ (A : int , A : list , A : list ):
if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case__ : Dict = len(A )
if num_items != len(A ):
snake_case__ : str = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A )} values'''
)
raise ValueError(A )
for i in range(A ):
if not isinstance(wt[i] , A ):
snake_case__ : Optional[int] = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A )
snake_case__ , snake_case__ : Optional[int] = knapsack(A , A , A , A )
snake_case__ : set = set()
_construct_solution(A , A , A , A , A )
return optimal_val, example_optional_set
def lowercase_ (A : list , A : list , A : int , A : int , A : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A , A , i - 1 , A , A )
else:
optimal_set.add(A )
_construct_solution(A , A , i - 1 , j - wt[i - 1] , A )
if __name__ == "__main__":
a_ :Any = [3, 2, 4, 4]
a_ :List[Any] = [4, 3, 2, 3]
a_ :Union[str, Any] = 4
a_ :List[str] = 6
a_ :Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ :List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ :Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 277 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 359 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE ) , """Tatoeba directory does not exist.""" )
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
snake_case : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case ,snake_case : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = RobertaPreLayerNormConfig.from_pretrained(
_lowerCAmelCase, architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
_a = torch.load(hf_hub_download(repo_id=_lowerCAmelCase, filename='''pytorch_model.bin''' ) )
_a = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
_a = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
_a = tensor_value
_a = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCAmelCase, config=_lowerCAmelCase, state_dict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
# convert tokenizer
_a = AutoTokenizer.from_pretrained(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 320 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin | 320 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=7 , lowercase_ : List[Any]=3 , lowercase_ : str=18 , lowercase_ : Optional[Any]=30 , lowercase_ : Dict=400 , lowercase_ : int=True , lowercase_ : List[Any]=32 , lowercase_ : List[str]=True , ):
lowercase_ : List[str] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = image_size
lowercase_ : Any = min_resolution
lowercase_ : Union[str, Any] = max_resolution
lowercase_ : List[str] = do_resize
lowercase_ : List[str] = size_divisor
lowercase_ : List[Any] = do_rescale
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size_divisor""" ) )
self.assertTrue(hasattr(lowercase_ , """resample""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# Initialize image_processing
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 21 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = embedding_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
__A = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase__ )
__lowerCamelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=lowerCamelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCamelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCamelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 90 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case_ (__A : Any ) -> str:
__lowerCAmelCase = args.pruning_method
__lowerCAmelCase = args.threshold
__lowerCAmelCase = args.model_name_or_path.rstrip("""/""" )
__lowerCAmelCase = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
__lowerCAmelCase = torch.load(os.path.join(__A , """pytorch_model.bin""" ) )
__lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
__lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__lowerCAmelCase = MagnitudeBinarizer.apply(inputs=__A , threshold=__A )
__lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase = TopKBinarizer.apply(__A , __A )
__lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase = ThresholdBinarizer.apply(__A , __A , __A )
__lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[f'''{prefix_}mask_scores''']
__lowerCAmelCase = -0.1, 1.1
__lowerCAmelCase = torch.sigmoid(__A )
__lowerCAmelCase = s * (r - l) + l
__lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowerCAmelCase = os.path.join(
os.path.dirname(__A ) , f'''bertarized_{os.path.basename(__A )}''' )
if not os.path.isdir(__A ):
shutil.copytree(__A , __A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__A , os.path.join(__A , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 359 |
from __future__ import annotations
def snake_case_ (__A : list[float] , __A : list[float] ) -> float:
__lowerCAmelCase : Union[str, Any] = sorted(numsa + numsa )
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 139 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __UpperCAmelCase ( __lowerCamelCase ) -> bytes:
lowercase__ : List[str] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase__ : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip()
lowerCAmelCase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 16 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
from pathlib import Path
import fire
def _a ( lowerCamelCase: str , lowerCamelCase: str , lowerCamelCase: int ) -> str:
'''simple docstring'''
__A = Path(lowerCamelCase )
__A = Path(lowerCamelCase )
dest_dir.mkdir(exist_ok=lowerCamelCase )
for path in src_dir.iterdir():
__A = [x.rstrip() for x in list(path.open().readlines() )][:n]
__A = dest_dir.joinpath(path.name )
print(lowerCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 250 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[Any] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
snake_case__ : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 250 | 1 |
"""simple docstring"""
lowerCamelCase_ : Optional[int] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 81 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 1.5
UpperCAmelCase_ = int(factor * num_class_images )
UpperCAmelCase_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
UpperCAmelCase_ = int(factor * num_images )
UpperCAmelCase_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase__ )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
UpperCAmelCase_ = class_images[count]
count += 1
try:
UpperCAmelCase_ = requests.get(images["url"] )
if img.status_code == 200:
UpperCAmelCase_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("" , add_help=lowerCAmelCase__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 241 |
"""simple docstring"""
from maths.prime_check import is_prime
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A : Tuple = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
A : List[Any] = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
A : str = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
A : Any = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
A : List[Any] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
A : List[str] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
A : Any = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
A : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
A : int = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
A : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' )
A : List[Any] = key.replace('''text_encoder.module''' , '''flava.text_model''' )
A : Any = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
A : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
A : List[Any] = key.replace('''text_projection''' , '''flava.text_projection''' )
A : List[str] = key.replace('''image_projection''' , '''flava.image_projection''' )
A : Any = value.float()
for key, value in codebook_state_dict.items():
A : str = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if config_path is not None:
A : List[Any] = FlavaConfig.from_pretrained(a__ )
else:
A : Tuple = FlavaConfig()
A : List[str] = FlavaForPreTraining(a__ ).eval()
A : Dict = convert_dalle_checkpoint(a__ , a__ , save_checkpoint=a__ )
if os.path.exists(a__ ):
A : List[str] = torch.load(a__ , map_location='''cpu''' )
else:
A : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )
A : List[str] = upgrade_state_dict(a__ , a__ )
hf_model.load_state_dict(a__ )
A : Tuple = hf_model.state_dict()
A : Optional[int] = count_parameters(a__ )
A : Optional[int] = count_parameters(a__ ) + count_parameters(a__ )
assert torch.allclose(a__ , a__ , atol=1E-3 )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : Any = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 3 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 0 |
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ : dict[str, str | None] = {}
lowerCAmelCase__ : Dict = source_vertex
def UpperCAmelCase_ ( self ) -> None:
lowerCAmelCase__ : List[Any] = {self.source_vertex}
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Dict = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ : Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = vertex
queue.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ : Dict = self.parent.get(__UpperCAmelCase )
if target_vertex_parent is None:
lowerCAmelCase__ : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(__UpperCAmelCase )
return self.shortest_path(__UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_lowerCAmelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 184 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[Union[str, Path]] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[int] = None
__lowercase : int = 1
__lowercase : Optional[Union[str, bool]] = None
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
def UpperCAmelCase_ ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 184 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Any ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCamelCase :
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int]=99 , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Dict=7 , _SCREAMING_SNAKE_CASE: List[Any]=9 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: List[Any]=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=5 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=37 , _SCREAMING_SNAKE_CASE: Union[str, Any]=8 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Any=0.0_02 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = encoder_seq_length
UpperCamelCase_ = decoder_seq_length
# For common tests
UpperCamelCase_ = self.decoder_seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = d_ff
UpperCamelCase_ = relative_attention_num_buckets
UpperCamelCase_ = dropout_rate
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = decoder_start_token_id
UpperCamelCase_ = None
UpperCamelCase_ = decoder_layers
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: List[str]=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
UpperCamelCase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
UpperCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = self.get_config()
UpperCamelCase_ = config.num_attention_heads
UpperCamelCase_ = self.prepare_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, input_dict
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self: Tuple ) -> Any:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = UMTaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(
input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = result.last_hidden_state
UpperCamelCase_ = result.past_key_values
UpperCamelCase_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_SCREAMING_SNAKE_CASE ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = UMTaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval()
# first forward pass
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )["last_hidden_state"]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = UMTaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).half().eval()
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_SCREAMING_SNAKE_CASE ).any().item() )
@require_torch
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_UpperCamelCase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_UpperCamelCase : List[str] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = True
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Tuple = True
_UpperCamelCase : Dict = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_UpperCamelCase : Dict = [0.8, 0.9]
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowercase ( self: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = UMTaModel(config_and_inputs[0] ).to(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_SCREAMING_SNAKE_CASE , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = config_and_inputs[0]
UpperCamelCase_ = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
}
for attn_name, (name, mask) in zip(_SCREAMING_SNAKE_CASE , head_masking.items() ):
UpperCamelCase_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowercase ( self: List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_SCREAMING_SNAKE_CASE , legacy=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" , padding=_SCREAMING_SNAKE_CASE ).input_ids
# fmt: off
UpperCamelCase_ = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model.generate(input_ids.to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
UpperCamelCase_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A : Optional[int] = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A : Union[str, Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split("://" )[1]
return dataset_path
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = not is_remote_filesystem(lowerCamelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase__ ) , fs._strip_protocol(lowerCamelCase__ ) )
else:
fs.mv(lowerCamelCase__ , lowerCamelCase__ , recursive=lowerCamelCase__ )
def _lowerCamelCase ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 57 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase : Union[str, Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Union[str, Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : int = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowercase : int = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowercase : Optional[int] = '''▁'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = AlbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ : Dict = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[int] = do_lower_case
snake_case_ : Optional[Any] = remove_space
snake_case_ : Union[str, Any] = keep_accents
snake_case_ : Union[str, Any] = vocab_file
snake_case_ : List[str] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Any = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 367 |
import numpy as np
def lowerCAmelCase__ ( _a : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class a__ ( A__ , A__ ):
"""simple docstring"""
__lowerCamelCase = '''resnet'''
__lowerCamelCase = ['''basic''', '''bottleneck''']
def __init__( self , lowercase=3 , lowercase=64 , lowercase=[256, 512, 1024, 2048] , lowercase=[3, 4, 6, 3] , lowercase="bottleneck" , lowercase="relu" , lowercase=False , lowercase=None , lowercase=None , **lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
A__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class a__ ( A__ ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
import numpy as np
def UpperCamelCase ( __lowercase : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase ( __lowercase : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
"""simple docstring"""
__UpperCamelCase : str =parent
__UpperCamelCase : Dict =batch_size
__UpperCamelCase : Union[str, Any] =seq_length
__UpperCamelCase : List[str] =is_training
__UpperCamelCase : int =use_input_mask
__UpperCamelCase : Tuple =use_token_type_ids
__UpperCamelCase : Optional[int] =use_labels
__UpperCamelCase : Union[str, Any] =vocab_size
__UpperCamelCase : int =hidden_size
__UpperCamelCase : int =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : Dict =intermediate_size
__UpperCamelCase : Any =hidden_act
__UpperCamelCase : int =hidden_dropout_prob
__UpperCamelCase : Dict =attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] =max_position_embeddings
__UpperCamelCase : List[Any] =type_vocab_size
__UpperCamelCase : Union[str, Any] =type_sequence_label_size
__UpperCamelCase : int =initializer_range
__UpperCamelCase : List[str] =num_labels
__UpperCamelCase : Optional[int] =num_choices
__UpperCamelCase : List[str] =scope
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : List[str] =None
if self.use_input_mask:
__UpperCamelCase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[str] =None
if self.use_token_type_ids:
__UpperCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] =None
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : List[str] =None
if self.use_labels:
__UpperCamelCase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =LlamaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[str] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[Any] =True
__UpperCamelCase : Tuple =LlamaModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : str =model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__UpperCamelCase : str =model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Any =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : str =True
__UpperCamelCase : str =True
__UpperCamelCase : Optional[int] =LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__UpperCamelCase : Any =model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
__UpperCamelCase : Dict =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : str =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Optional[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : List[str] =model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['hidden_states'][0]
__UpperCamelCase : Optional[int] =model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['hidden_states'][0]
# select random slice
__UpperCamelCase : Optional[int] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : List[str] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[Any] =config_and_inputs
__UpperCamelCase : str ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( a , a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : int =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : str =(
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =LlamaModelTester(self )
__UpperCamelCase : str =ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : str =type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[Any] =3
__UpperCamelCase : List[str] =input_dict['input_ids']
__UpperCamelCase : Tuple =input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Optional[int] =LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[Any] =3
__UpperCamelCase : int ='single_label_classification'
__UpperCamelCase : List[Any] =input_dict['input_ids']
__UpperCamelCase : int =input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : str =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[Any] =LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] =3
__UpperCamelCase : Optional[Any] ='multi_label_classification'
__UpperCamelCase : Optional[int] =input_dict['input_ids']
__UpperCamelCase : int =input_ids.ne(1 ).to(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Any =LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Any =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __lowercase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any =ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : Tuple =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict =LlamaModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
__UpperCamelCase : Any =original_model(lowerCamelCase__ ).last_hidden_state
__UpperCamelCase : Any =original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Tuple ={'type': scaling_type, 'factor': 10.0}
__UpperCamelCase : List[str] =LlamaModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
__UpperCamelCase : Optional[Any] =scaled_model(lowerCamelCase__ ).last_hidden_state
__UpperCamelCase : Union[str, Any] =scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase : int =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__UpperCamelCase : Optional[Any] =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : Optional[Any] =torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Tuple =torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase : Dict =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__UpperCamelCase : List[str] =model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
__UpperCamelCase : Optional[Any] =torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any =torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__UpperCamelCase : Optional[Any] =model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
__UpperCamelCase : Dict =torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Union[str, Any] =torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase : str =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__UpperCamelCase : str =model(torch.tensor(lowerCamelCase__ ) )
__UpperCamelCase : Optional[Any] =torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : Optional[Any] =torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__UpperCamelCase : List[Any] ='Simply put, the theory of relativity states that '
__UpperCamelCase : Optional[Any] =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__UpperCamelCase : int =tokenizer.encode(lowerCamelCase__ , return_tensors='pt' )
__UpperCamelCase : Any =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowerCamelCase__ )
# greedy generation outputs
__UpperCamelCase : Dict =model.generate(lowerCamelCase__ , max_new_tokens=64 , top_p=lowerCamelCase__ , temperature=1 , do_sample=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 71 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ :Optional[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def A ( a_ ) -> List[Any]:
__UpperCamelCase : Any =['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_ ,a_ )
A_ :int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =list(s_dict.keys() )
for key in keys:
__UpperCamelCase : str =key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCamelCase : Optional[Any] =new_key.replace(a_ ,a_ )
print(F'{key} -> {new_key}' )
__UpperCamelCase : Dict =s_dict.pop(a_ )
return s_dict
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Tuple =emb.weight.shape
__UpperCamelCase : Tuple =nn.Linear(a_ ,a_ ,bias=a_ )
__UpperCamelCase : List[Any] =emb.weight.data
return lin_layer
def A ( a_ ,a_ ) -> bytes:
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =os.path.basename(a_ )
__UpperCamelCase : Union[str, Any] =url.split('/' )[-2]
__UpperCamelCase : Union[str, Any] =os.path.join(a_ ,a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(a_ ):
__UpperCamelCase : str =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(a_ ) as source, open(a_ ,'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) ,ncols=80 ,unit='iB' ,unit_scale=a_ ,unit_divisor=1_024 ) as loop:
while True:
__UpperCamelCase : Optional[Any] =source.read(8_192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
__UpperCamelCase : List[Any] =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( a_ ,a_ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
__UpperCamelCase : int =_download(_MODELS[checkpoint_path] )
else:
__UpperCamelCase : List[str] =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : Union[str, Any] =original_checkpoint['dims']
__UpperCamelCase : List[Any] =original_checkpoint['model_state_dict']
__UpperCamelCase : Dict =state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
__UpperCamelCase : List[str] =True
__UpperCamelCase : str =state_dict['decoder.layers.0.fc1.weight'].shape[0]
__UpperCamelCase : Optional[int] =WhisperConfig(
vocab_size=dimensions['n_vocab'] ,encoder_ffn_dim=a_ ,decoder_ffn_dim=a_ ,num_mel_bins=dimensions['n_mels'] ,d_model=dimensions['n_audio_state'] ,max_target_positions=dimensions['n_text_ctx'] ,encoder_layers=dimensions['n_audio_layer'] ,encoder_attention_heads=dimensions['n_audio_head'] ,decoder_layers=dimensions['n_text_layer'] ,decoder_attention_heads=dimensions['n_text_state'] ,max_source_positions=dimensions['n_audio_ctx'] ,)
__UpperCamelCase : List[str] =WhisperForConditionalGeneration(a_ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =model.model.load_state_dict(a_ ,strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
__UpperCamelCase : Optional[int] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase : List[str] =proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
A_ :List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ :List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowercase ( __A ):
'''simple docstring'''
def is_in_circle(__A ,__A ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(__A ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def _lowercase ( __A ,__A ,__A = 0.0 ,__A = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__A ,__A ) ) for _ in range(__A ) ) * (max_value - min_value)
def _lowercase ( __A ,__A = 0.0 ,__A = 1.0 ):
'''simple docstring'''
def identity_function(__A ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
__A ,__A ,__A ,__A )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def _lowercase ( __A ):
'''simple docstring'''
def function_to_integrate(__A ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
__A ,__A ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Tuple = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 243 | 0 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _UpperCAmelCase ( snake_case = 50_00 ):
"""simple docstring"""
_lowerCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
_lowerCAmelCase = pentagonal_nums[j]
_lowerCAmelCase = pentagonal_i + pentagonal_j
_lowerCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 82 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Any ):
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 61 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__ , a % b )
def _UpperCamelCase ( ):
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 61 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ = 5
UpperCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class a ( __lowercase , unittest.TestCase ):
_snake_case : int = SpeechaTextTokenizer
_snake_case : int = False
_snake_case : str = True
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
_UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(snake_case_ )
_UpperCAmelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case_ ) )]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = '''<pad>'''
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(snake_case_ ) , 1001 )
def lowerCAmelCase_ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [289, 50, 14, 174, 386] , )
_UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowerCAmelCase_ ( self : int ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class a ( unittest.TestCase ):
_snake_case : int = "valhalla/s2t_mustc_multilinguial_medium"
_snake_case : str = "C'est trop cool"
_snake_case : Optional[int] = "Esto es genial"
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] ):
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def lowerCAmelCase_ ( self : List[str] ):
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
_UpperCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
_UpperCAmelCase = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = '''fr'''
_UpperCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_UpperCAmelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 289 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase (self : int ):
__a , __a : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Any = '''A painting of a squirrel eating a burger'''
__a : Dict = jax.device_count()
__a : Optional[int] = num_samples * [prompt]
__a : Optional[Any] = sd_pipe.prepare_inputs(snake_case_ )
__a : Optional[Any] = replicate(snake_case_ )
__a : Optional[int] = shard(snake_case_ )
__a : int = jax.random.PRNGKey(0 )
__a : str = jax.random.split(snake_case_ , jax.device_count() )
__a : int = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase (self : Tuple ):
__a : Optional[Any] = '''stabilityai/stable-diffusion-2'''
__a , __a : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
__a , __a : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Union[str, Any] = scheduler_params
__a : List[Any] = '''A painting of a squirrel eating a burger'''
__a : Any = jax.device_count()
__a : Any = num_samples * [prompt]
__a : List[Any] = sd_pipe.prepare_inputs(snake_case_ )
__a : Tuple = replicate(snake_case_ )
__a : Dict = shard(snake_case_ )
__a : Dict = jax.random.PRNGKey(0 )
__a : Dict = jax.random.split(snake_case_ , jax.device_count() )
__a : str = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : List[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 216 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _a( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict =nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ : List[str] =nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ : Any =nn.Linear(4 , 5 )
def __magic_name__ ( self : Any , __lowercase : Optional[Any] ) -> Any:
return self.lineara(self.batchnorm(self.lineara(__lowercase ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : str =[]
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__lowercase : Any ):
nonlocal batch_sizes
batch_sizes.append(__lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowercase , [1_28, 64, 32, 16, 8] )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__lowercase : str , __lowercase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =mock_training_loop_function('''hello''' )
self.assertListEqual(__lowercase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __magic_name__ ( self : Tuple ) -> Dict:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowercase : List[str] ):
pass
with self.assertRaises(__lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __magic_name__ ( self : str ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowercase : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowercase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __magic_name__ ( self : Dict ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowercase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ : str =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =release_memory(__lowercase )
self.assertEqual(torch.cuda.memory_allocated() , __lowercase ) | 222 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[Any] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : List[str] =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_center_crop
SCREAMING_SNAKE_CASE__ : str =crop_size
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 222 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :int = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =torch.load(__UpperCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__UpperCamelCase =torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__UpperCamelCase =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
__UpperCamelCase ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__UpperCamelCase =sd.pop(__UpperCamelCase )
__UpperCamelCase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__UpperCamelCase =sd[key]
# We split QKV in separate Q,K,V
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__UpperCamelCase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =torch.split(__UpperCamelCase , depth // 3 , dim=0 )
__UpperCamelCase =q
__UpperCamelCase =k
__UpperCamelCase =v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=None ):
"""simple docstring"""
__UpperCamelCase =load_checkpoint(__UpperCamelCase )
if config is not None:
__UpperCamelCase =OPTConfig.from_pretrained(__UpperCamelCase )
else:
__UpperCamelCase =OPTConfig()
__UpperCamelCase =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 85 | """simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BartphoTokenizer
lowercase__ = False
lowercase__ = True
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCamelCase =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 85 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *A : Union[str, Any] , **A : Optional[int] ):
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 111 | 0 |
"""simple docstring"""
from __future__ import annotations
class A__ :
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple=None) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = data
__lowerCAmelCase : List[str] = None
def __repr__( self: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : List[str] = self
while temp:
string_rep.append(F"""{temp.data}""")
__lowerCAmelCase : Dict = temp.next
return "->".join(lowerCamelCase__)
def _lowercase ( __snake_case ) -> List[str]:
if not elements_list:
raise Exception("The Elements List is empty" )
__lowerCAmelCase : Dict = Node(elements_list[0] )
for i in range(1 ,len(__snake_case ) ):
__lowerCAmelCase : Optional[int] = Node(elements_list[i] )
__lowerCAmelCase : Dict = current.next
return head
def _lowercase ( __snake_case ) -> int:
if head_node is not None and isinstance(__snake_case ,__snake_case ):
print_reverse(head_node.next )
print(head_node.data )
def _lowercase ( ) -> Any:
from doctest import testmod
testmod()
__lowerCAmelCase : int = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__snake_case )
print("Elements in Reverse:" )
print_reverse(__snake_case )
if __name__ == "__main__":
main() | 357 |
"""simple docstring"""
from ....utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: int=2048) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = config.__dict__
__lowerCAmelCase : Dict = modal_hidden_size
if num_labels:
__lowerCAmelCase : Optional[int] = num_labels | 58 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowercase ( _lowercase ):
a = CustomTokenizer
pass
| 41 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ : List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223 | 0 |
"""simple docstring"""
import random
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = num - 1
lowercase__ : Optional[int] = 0
while s % 2 == 0:
lowercase__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
lowercase__ : List[str] = random.randrange(2 , num - 1 )
lowercase__ : Any = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if v != 1:
lowercase__ : Optional[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase__ : str = i + 1
lowercase__ : Optional[Any] = (v**2) % num
return True
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if num < 2:
return False
lowercase__ : Dict = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : int = 1024 ):
'''simple docstring'''
while True:
lowercase__ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCAmelCase ):
return num
if __name__ == "__main__":
_UpperCamelCase : Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 77 |
'''simple docstring'''
from __future__ import annotations
import math
a_ = '2020.9.26'
a_ = 'xcodz-dot, cclaus, dhruvmanila'
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE__ : int =f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE__ : Tuple =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Axis must be a str''' )
SCREAMING_SNAKE_CASE__ : List[Any] =locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE__ : List[str] =(
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =(angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE__ : str =x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =z
elif axis == "x":
SCREAMING_SNAKE_CASE__ : Dict =y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =x
elif axis == "y":
SCREAMING_SNAKE_CASE__ : Tuple =x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''') | 152 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase__ = None
try:
import msvcrt
except ImportError:
lowerCamelCase__ = None
try:
import fcntl
except ImportError:
lowerCamelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase__ = OSError
# Data
# ------------------------------------------------
lowerCamelCase__ = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase__ = '''3.0.12'''
lowerCamelCase__ = None
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
global _logger
_UpperCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = lock_file
return None
def __str__( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = lock
return None
def __enter__( self : Union[str, Any]) -> int:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any) -> int:
"""simple docstring"""
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Tuple=-1 , lowercase_ : Optional[int]=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_UpperCamelCase = self.hash_filename_if_too_long(lowercase_ , lowercase_)
# The path to the lock file.
_UpperCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCamelCase = None
# The default timeout value.
_UpperCamelCase = timeout
# We use this lock primarily for the lock counter.
_UpperCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCamelCase = 0
return None
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
return self._timeout
@timeout.setter
def __UpperCAmelCase ( self : Tuple , lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = float(lowercase_)
return None
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self._lock_file_fd is not None
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : Optional[int]=0.05) -> str:
"""simple docstring"""
if timeout is None:
_UpperCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCamelCase = id(self)
_UpperCamelCase = self._lock_file
_UpperCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(lowercase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCamelCase = max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def __UpperCAmelCase ( self : Dict , lowercase_ : List[Any]=False) -> List[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCamelCase = id(self)
_UpperCamelCase = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
_UpperCamelCase = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self : List[str]) -> int:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
self.release()
return None
def __del__( self : int) -> Dict:
"""simple docstring"""
self.release(force=lowercase_)
return None
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = os.path.basename(lowercase_)
if len(lowercase_) > max_length and max_length > 0:
_UpperCamelCase = os.path.dirname(lowercase_)
_UpperCamelCase = str(hash(lowercase_))
_UpperCamelCase = filename[: max_length - len(lowercase_) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase_ , lowercase_)
else:
return path
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=-1 , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
_UpperCamelCase = "\\\\?\\" + relative_to_absolute_path(self.lock_file)
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCamelCase = os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase_)
else:
_UpperCamelCase = fd
return None
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = self._lock_file_fd
_UpperCamelCase = None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1)
os.close(lowercase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : List[str] , lowercase_ : Optional[Any]=-1 , lowercase_ : List[Any]=None) -> Tuple:
"""simple docstring"""
_UpperCamelCase = os.statvfs(os.path.dirname(lowercase_)).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCamelCase = os.open(self._lock_file , lowercase_)
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase_)
else:
_UpperCamelCase = fd
return None
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self._lock_file_fd
_UpperCamelCase = None
fcntl.flock(lowercase_ , fcntl.LOCK_UN)
os.close(lowercase_)
return None
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCamelCase = os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
_UpperCamelCase = fd
return None
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
os.close(self._lock_file_fd)
_UpperCamelCase = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase__ = None
if msvcrt:
lowerCamelCase__ = WindowsFileLock
elif fcntl:
lowerCamelCase__ = UnixFileLock
else:
lowerCamelCase__ = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 369 | import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_prompt=lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=lowercase_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_special_tokens=lowercase_)
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(lowercase_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = TextIteratorStreamer(lowercase_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 63 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
lowercase = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
snake_case_ = getattr(a , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**a )
snake_case_ = do_lower_case
def _UpperCamelCase ( self , a , a=None ) -> Tuple:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 178 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=64 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = vocab_size - 1
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Optional[int]:
snake_case_ = GPTNeoXModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> List[str]:
snake_case_ = True
snake_case_ = GPTNeoXModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> Optional[int]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a , a , a , a ) -> List[str]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a ) -> str:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
snake_case_ = True
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> str:
snake_case_ = GPTNeoXModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _UpperCamelCase ( self ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , a ) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
snake_case_ = original_model(a ).last_hidden_state
snake_case_ = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {'type': scaling_type, 'factor': 10.0}
snake_case_ = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
snake_case_ = scaled_model(a ).last_hidden_state
snake_case_ = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
snake_case_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
snake_case_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
snake_case_ = model.generate(**a , do_sample=a , max_new_tokens=20 )
snake_case_ = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 178 | 1 |
from math import sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1_00_01 ):
__UpperCamelCase =0
__UpperCamelCase =1
while count != nth and number < 3:
number += 1
if is_prime(lowercase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowercase_ ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 357 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 117 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase__ = get_sagemaker_input()
else:
UpperCamelCase__ = get_cluster_input()
return config
def __magic_name__ ( __a : Any=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser("""config""" , description=A__ )
else:
UpperCamelCase__ = argparse.ArgumentParser("""Accelerate config command""" , description=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
UpperCamelCase__ = get_user_input()
if args.config_file is not None:
UpperCamelCase__ = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase__ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f"accelerate configuration saved at {config_file}" )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = config_command_parser()
UpperCamelCase__ = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 244 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : Tuple = regularization
a__ : Optional[Any] = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : str = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : List[str] = observations
a__ : Dict = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Optional[int] = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[int] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : Optional[Any] = LinearConstraint(lowercase , 0 , 0)
a__ : str = Bounds(0 , self.regularization)
a__ : List[str] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : Dict = l_star
# calculating mean offset of separation plane to points
a__ : int = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : List[str] = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PhobertTokenizer
SCREAMING_SNAKE_CASE_ : str = False
def __A ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', 'l à</w>']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def __A ( self , **lowerCAmelCase__ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Dict:
SCREAMING_SNAKE_CASE = 'Tôi là VinAI Research'
SCREAMING_SNAKE_CASE = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = 'Tôi là VinAI Research'
SCREAMING_SNAKE_CASE = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ )
print(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 38 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Any = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :int = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'transfo-xl'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['mems']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowercase=267735 , lowercase=[20000, 40000, 200000] , lowercase=1024 , lowercase=1024 , lowercase=16 , lowercase=64 , lowercase=4096 , lowercase=4 , lowercase=False , lowercase=18 , lowercase=1600 , lowercase=1000 , lowercase=True , lowercase=True , lowercase=0 , lowercase=-1 , lowercase=True , lowercase=0.1 , lowercase=0.0 , lowercase=True , lowercase="normal" , lowercase=0.01 , lowercase=0.01 , lowercase=0.02 , lowercase=1E-5 , lowercase=0 , **lowercase , ):
A_ : int = vocab_size
A_ : int = []
self.cutoffs.extend(lowercase )
if proj_share_all_but_first:
A_ : Optional[Any] = [False] + [True] * len(self.cutoffs )
else:
A_ : List[Any] = [False] + [False] * len(self.cutoffs )
A_ : str = d_model
A_ : Optional[Any] = d_embed
A_ : List[str] = d_head
A_ : Any = d_inner
A_ : List[Any] = div_val
A_ : Tuple = pre_lnorm
A_ : List[Any] = n_layer
A_ : List[str] = n_head
A_ : List[str] = mem_len
A_ : Dict = same_length
A_ : List[Any] = attn_type
A_ : Optional[int] = clamp_len
A_ : int = sample_softmax
A_ : List[str] = adaptive
A_ : Dict = dropout
A_ : int = dropatt
A_ : Optional[int] = untie_r
A_ : List[Any] = init
A_ : int = init_range
A_ : Dict = proj_init_std
A_ : Optional[int] = init_std
A_ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase , **lowercase )
@property
def _a (self ):
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _a (self , lowercase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' ) | 206 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = 10
def _a (self ):
A_ : Dict = [1, 2, 3, 4]
A_ : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[str] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Dict = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[int] = """"""
A_, A_ : List[str] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : Optional[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Dict = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
A_ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[Any] = 101
A_ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Dict = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase ) | 206 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A_ ( _a ):
lowerCAmelCase__ = 'time_series_transformer'
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: List[str] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = "student_t" ,__lowerCAmelCase: str = "nll" ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] ,__lowerCAmelCase: Optional[Union[str, bool]] = "mean" ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: str = "gelu" ,__lowerCAmelCase: int = 64 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 0.02 ,__lowerCAmelCase: Union[str, Any]=True ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = prediction_length
_lowerCamelCase : List[Any] = context_length or prediction_length
_lowerCamelCase : Dict = distribution_output
_lowerCamelCase : List[str] = loss
_lowerCamelCase : Tuple = input_size
_lowerCamelCase : Optional[Any] = num_time_features
_lowerCamelCase : List[str] = lags_sequence
_lowerCamelCase : List[Any] = scaling
_lowerCamelCase : List[str] = num_dynamic_real_features
_lowerCamelCase : Optional[Any] = num_static_real_features
_lowerCamelCase : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_lowerCamelCase : str = cardinality
else:
_lowerCamelCase : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_lowerCamelCase : List[str] = embedding_dimension
else:
_lowerCamelCase : str = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
_lowerCamelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
_lowerCamelCase : Dict = input_size * len(__lowerCAmelCase ) + self._number_of_features
_lowerCamelCase : Optional[int] = d_model
_lowerCamelCase : List[Any] = encoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_attention_heads
_lowerCamelCase : Optional[Any] = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = decoder_ffn_dim
_lowerCamelCase : List[str] = encoder_layers
_lowerCamelCase : List[str] = decoder_layers
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : Union[str, Any] = encoder_layerdrop
_lowerCamelCase : str = decoder_layerdrop
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Optional[Any] = init_std
_lowerCamelCase : Tuple = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 340 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowercase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE : Tuple = image_classifier(UpperCAmelCase__ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase__ ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
] , )
@require_tf
def _lowercase ( self : Any ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(UpperCAmelCase__ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
SCREAMING_SNAKE_CASE : str = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase__ )},
],
] , )
@slow
@require_torch
def _lowercase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(UpperCAmelCase__ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
SCREAMING_SNAKE_CASE : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _lowercase ( self : List[str] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier(UpperCAmelCase__ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
SCREAMING_SNAKE_CASE : Dict = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 245 |
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ : Tuple = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowercase ( _A ) -> str:
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 245 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
return 12
@property
def __A ( self : Dict ) -> Any:
return 12
@property
def __A ( self : Any ) -> str:
return 32
@property
def __A ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __A ( self : Dict ) -> List[str]:
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __A ( self : Dict ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def __A ( self : int ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase = 12
__lowerCamelCase = 12
__lowerCamelCase = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__lowerCamelCase = TransformeraDModel(**_SCREAMING_SNAKE_CASE )
return model
def __A ( self : int ) -> Optional[int]:
__lowerCamelCase = "cpu"
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
__lowerCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = "teddy bear playing in the pool"
__lowerCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : str ) -> Optional[int]:
__lowerCamelCase = "cpu"
__lowerCamelCase = self.dummy_vqvae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_transformer
__lowerCamelCase = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
__lowerCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = "teddy bear playing in the pool"
__lowerCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCamelCase = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCamelCase = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[str] ) -> List[Any]:
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowerCamelCase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowerCamelCase = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCamelCase = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 364 |
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None:
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(__lowerCAmelCase ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : str = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 3
_lowerCamelCase : List[Any] = (3_2, 3_2)
_lowerCamelCase : str = jax.random.PRNGKey(0 )
_lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
| 72 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : List[str] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
__UpperCamelCase : Optional[int] = '▁'
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """token_type_ids"""]
UpperCamelCase_ = FNetTokenizer
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Union[str, Any]="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Tuple="[MASK]" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : str = remove_space
SCREAMING_SNAKE_CASE : Union[str, Any] = keep_accents
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def __A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 258 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _lowercase ):
return (data["data"], data["target"])
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def A ( ):
SCREAMING_SNAKE_CASE : str = fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "convbert"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=768 , __UpperCAmelCase=2 , __UpperCAmelCase=9 , __UpperCAmelCase=1 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = embedding_size
__UpperCamelCase = head_ratio
__UpperCamelCase = conv_kernel_size
__UpperCamelCase = num_groups
__UpperCamelCase = classifier_dropout
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 316 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 1 |
'''simple docstring'''
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = [0] * len_array
if len_array > 0:
__lowerCAmelCase = array[0]
for i in range(1 , __lowercase ):
__lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def _snake_case (self , __lowercase , __lowercase ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _snake_case (self , __lowercase ):
__lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 150 | """simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 45
SCREAMING_SNAKE_CASE__ = 1_581
SCREAMING_SNAKE_CASE__ = 1_517
SCREAMING_SNAKE_CASE__ = 1_570
SCREAMING_SNAKE_CASE__ = 1_584
SCREAMING_SNAKE_CASE__ = 1_793
SCREAMING_SNAKE_CASE__ = 1_795
SCREAMING_SNAKE_CASE__ = 1_916
SCREAMING_SNAKE_CASE__ = 1_864
SCREAMING_SNAKE_CASE__ = 1_905
SCREAMING_SNAKE_CASE__ = 1_919
SCREAMING_SNAKE_CASE__ = 2_429
SCREAMING_SNAKE_CASE__ = 2_208
SCREAMING_SNAKE_CASE__ = 2_418
SCREAMING_SNAKE_CASE__ = 2_323
SCREAMING_SNAKE_CASE__ = 2_407
# @@protoc_insertion_point(module_scope)
| 150 | 1 |
from statistics import mean, stdev
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 3 ):
lowerCamelCase_ = min(lowerCamelCase__ )
lowerCamelCase_ = max(lowerCamelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCamelCase__ ) for x in data]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 3 ):
lowerCamelCase_ = mean(lowerCamelCase__ )
lowerCamelCase_ = stdev(lowerCamelCase__ )
# standardize data
return [round((x - mu) / (sigma) , lowerCamelCase__ ) for x in data]
| 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 | 1 |
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
return np.maximum(0 , _UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Dict ="3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 170 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Union[str, Any] = IFInpaintingPipeline
snake_case__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
snake_case__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def A__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def A__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def A__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def A__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def A__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def A__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 32 | """simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] )->List[Any]:
A__ = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ = old_name.split('''.''' )
if layer == "0":
A__ = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
A__ = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
A__ = old_name.replace('''3''' , '''convolution2''' )
else:
A__ = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , UpperCamelCase__ ):
A__ = r'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) ):
A__ = re.search(r'''\d\.\d\d.''' , UpperCamelCase__ ).group()
else:
A__ = re.search(r'''\d\.\d.''' , UpperCamelCase__ ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(UpperCamelCase__ , '''''' )
A__ = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
A__ = '''intermediate_stages.''' + trimmed_name
else:
A__ = old_name.replace(UpperCamelCase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
A__ = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
A__ = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
A__ = trimmed_name.replace('''fc2''' , '''linear_out''' )
A__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , UpperCamelCase__ ):
A__ = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
A__ = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
A__ = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
A__ = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
A__ = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
A__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace('''norm''' , '''layernorm''' )
A__ = '''efficientformer.''' + new_name
else:
A__ = '''efficientformer.encoder.''' + new_name
return new_name
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] )->Dict:
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(UpperCamelCase__ )
A__ = val
return checkpoint
def UpperCamelCase__( )->Union[str, Any]:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
def UpperCamelCase__( UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : bool )->Optional[int]:
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
A__ = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
A__ = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
A__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 2_56
A__ = 2_24
A__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
A__ = processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(UpperCamelCase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__ , UpperCamelCase__ ),
] )
A__ = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
A__ = outputs.logits
A__ = (1, 10_00)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(UpperCamelCase__ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
a__: Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 193 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193 | 1 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
A: List[str] = num - 1
A: Any = 0
while s % 2 == 0:
A: str = s // 2
t += 1
for _ in range(5 ):
A: List[Any] = random.randrange(2 , num - 1 )
A: List[str] = pow(__lowercase , __lowercase , __lowercase )
if v != 1:
A: Optional[int] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A: Optional[Any] = i + 1
A: Union[str, Any] = (v**2) % num
return True
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num < 2:
return False
A: str = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_2_4 ) -> int:
while True:
A: int = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 334 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , *A : Optional[int] , **A : str ) ->None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 142 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowerCamelCase__ : Union[str, Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase__ : Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowerCamelCase__ : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase__ : int = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowerCamelCase__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase__ : Any = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowerCamelCase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase__ : Dict = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowerCamelCase__ : List[Any] = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowerCamelCase__ : str = (wi_a, wi_a)
else:
lowerCamelCase__ : Optional[int] = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowerCamelCase__ : Tuple = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _a ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = traverse_util.flatten_dict(variables['''target'''] )
lowerCamelCase__ : Union[str, Any] = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase__ : Any = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowerCamelCase__ : List[str] = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase__ : List[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowerCamelCase__ : Optional[Any] = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : Tuple = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Optional[int] = v.T
# Block i, layer 1 (MLP).
lowerCamelCase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : Any = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowerCamelCase__ : Any = layer_norm
if split_mlp_wi:
lowerCamelCase__ : Any = wi[0].T
lowerCamelCase__ : Any = wi[1].T
else:
lowerCamelCase__ : Tuple = wi.T
lowerCamelCase__ : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Tuple = tax_relpos_bias_lookup(
UpperCAmelCase , UpperCAmelCase , '''encoder''' ).T
lowerCamelCase__ : List[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCamelCase__ : Optional[Any] = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''encoder''' ).T
lowerCamelCase__ : Any = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowerCamelCase__ : Tuple = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase__ : Dict = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowerCamelCase__ : int = layer_norm
lowerCamelCase__ : int = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : Dict = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowerCamelCase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : int = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = layer_norm
if split_mlp_wi:
lowerCamelCase__ : List[str] = wi[0].T
lowerCamelCase__ : Optional[int] = wi[1].T
else:
lowerCamelCase__ : List[str] = wi.T
lowerCamelCase__ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Dict = tax_relpos_bias_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' ).T
lowerCamelCase__ : str = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCamelCase__ : Dict = state_dict['''shared.weight''']
return state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowerCamelCase__ : str = convert_tax_to_pytorch(
UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase , scalable_attention=UpperCAmelCase )
lowerCamelCase__ : int = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = MTaConfig.from_json_file(UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase__ : Optional[int] = UMTaEncoderModel(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = UMTaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_A : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142 | 1 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def __lowerCAmelCase ():
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCAmelCase : int = parser.parse_args()
return args.f
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase__ , "argv" , lowerCamelCase__ ):
_UpperCAmelCase : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
_UpperCAmelCase : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
| 365 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel
_SCREAMING_SNAKE_CASE : str = "sample"
@property
def __A ( self , __UpperCAmelCase=(32, 32) ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 4
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> str:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__UpperCAmelCase : List[Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
__UpperCAmelCase : str = model(__UpperCAmelCase ).sample
__UpperCAmelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
| 254 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 1 |
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCamelCase ( A__=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=A__ , default=A__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=A__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=A__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=A__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase = defaults.commands
if not args.tpu_name:
UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , A__ ):
UpperCamelCase = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase = '; '.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(A__ )}""" )
return
subprocess.run(A__ )
print('Successfully setup pod.' )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
UpperCamelCase = tpu_command_parser()
UpperCamelCase = parser.parse_args()
tpu_command_launcher(A__ )
| 369 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 249 | 0 |
"""simple docstring"""
import os
def _A () -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(lowerCAmelCase_ , '''triangle.txt''' )
with open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Dict = []
for line in triangle:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE_ : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE_ : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_ , lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 91 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : Any = np.random.RandomState(__a )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : int = pipe(**__a ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[int] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Dict = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Dict = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = ort.SessionOptions()
__lowercase : Dict = False
return options
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = """A fantasy landscape, trending on artstation"""
__lowercase : int = np.random.RandomState(0 )
__lowercase : List[str] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : Any = output.images
__lowercase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Any = init_image.resize((768, 512) )
__lowercase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__lowercase : Any = np.random.RandomState(0 )
__lowercase : Optional[Any] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 233 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=1 , ):
__a : Dict = parent
__a : str = batch_size
__a : Union[str, Any] = seq_length
__a : Any = is_training
__a : int = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : int = use_labels
__a : int = vocab_size
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : List[str] = type_sequence_label_size
__a : List[str] = initializer_range
__a : Optional[int] = num_labels
__a : List[str] = num_choices
__a : int = scope
__a : Union[str, Any] = q_groups
__a : Dict = k_groups
__a : List[str] = v_groups
__a : Any = post_attention_groups
__a : Optional[int] = intermediate_groups
__a : List[str] = output_groups
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
__a : Union[str, Any] = None
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Dict = ids_tensor([self.batch_size] , self.num_choices )
__a : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = self.num_labels
__a : List[Any] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = self.num_labels
__a : List[str] = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self.num_choices
__a : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Optional[Any] = config_and_inputs
__a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SqueezeBertModelTester(self )
__a : Dict = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__a : Tuple = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__a : List[str] = model(_UpperCAmelCase )[0]
__a : int = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) ) | 188 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : List[str] , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = jnp.ones((batch_size, length)) / length
return scores
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = 20
UpperCAmelCase_ = self._get_uniform_logits(batch_size=2 , length=_snake_case)
# tweak scores to not be uniform anymore
UpperCAmelCase_ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
UpperCAmelCase_ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
UpperCAmelCase_ = jax.nn.softmax(_snake_case , axis=-1)
UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5)
UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3)
UpperCAmelCase_ = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case) , axis=-1)
UpperCAmelCase_ = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = 10
UpperCAmelCase_ = 2
# create ramp distribution
UpperCAmelCase_ = np.broadcast_to(np.arange(_snake_case)[None, :] , (batch_size, vocab_size)).copy()
UpperCAmelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase_ = FlaxTopKLogitsWarper(3)
UpperCAmelCase_ = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
UpperCAmelCase_ = 5
UpperCAmelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
UpperCAmelCase_ = np.broadcast_to(np.arange(_snake_case)[None, :] , (batch_size, length)).copy()
UpperCAmelCase_ = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = 10
UpperCAmelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]]))
UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8)
UpperCAmelCase_ = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]])
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1e-3))
# check edge cases with negative and extreme logits
UpperCAmelCase_ = np.broadcast_to(np.arange(_snake_case)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCAmelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
UpperCAmelCase_ = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = 20
UpperCAmelCase_ = 4
UpperCAmelCase_ = 0
UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case)
# check that min length is applied at length 5
UpperCAmelCase_ = ids_tensor((batch_size, 20) , vocab_size=20)
UpperCAmelCase_ = 5
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = 15
UpperCAmelCase_ = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertFalse(jnp.isinf(_snake_case).any())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 20
UpperCAmelCase_ = 4
UpperCAmelCase_ = 0
UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case)
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase_ = ids_tensor((batch_size, 1) , vocab_size=20)
UpperCAmelCase_ = 1
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = logits_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = logits_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertFalse(jnp.isinf(_snake_case).any())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 20
UpperCAmelCase_ = 4
UpperCAmelCase_ = 0
UpperCAmelCase_ = 5
UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case)
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase_ = ids_tensor((batch_size, 4) , vocab_size=20)
UpperCAmelCase_ = 4
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = logits_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase_ = 3
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = logits_processor(_snake_case , _snake_case , cur_len=_snake_case)
self.assertFalse(jnp.isinf(_snake_case).any())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 10
UpperCAmelCase_ = 15
UpperCAmelCase_ = 2
UpperCAmelCase_ = 1
UpperCAmelCase_ = 15
# dummy input_ids and scores
UpperCAmelCase_ = ids_tensor((batch_size, sequence_length) , _snake_case)
UpperCAmelCase_ = input_ids.copy()
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = scores.copy()
# instantiate all dist processors
UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5)
UpperCAmelCase_ = FlaxTopKLogitsWarper(3)
UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case)
UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case)
UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = 10
# no processor list
UpperCAmelCase_ = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
# with processor list
UpperCAmelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
UpperCAmelCase_ = processor(_snake_case , _snake_case , cur_len=_snake_case)
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 10
UpperCAmelCase_ = 15
UpperCAmelCase_ = 2
UpperCAmelCase_ = 1
UpperCAmelCase_ = 15
# dummy input_ids and scores
UpperCAmelCase_ = ids_tensor((batch_size, sequence_length) , _snake_case)
UpperCAmelCase_ = input_ids.copy()
UpperCAmelCase_ = self._get_uniform_logits(_snake_case , _snake_case)
UpperCAmelCase_ = scores.copy()
# instantiate all dist processors
UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5)
UpperCAmelCase_ = FlaxTopKLogitsWarper(3)
UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case)
UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case)
UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = 10
# no processor list
def run_no_processor_list(_snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[str]):
UpperCAmelCase_ = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
UpperCAmelCase_ = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case)
return scores
# with processor list
def run_processor_list(_snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any]):
UpperCAmelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
UpperCAmelCase_ = processor(_snake_case , _snake_case , cur_len=_snake_case)
return scores
UpperCAmelCase_ = jax.jit(_snake_case)
UpperCAmelCase_ = jax.jit(_snake_case)
UpperCAmelCase_ = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = jitted_run_processor_list(_snake_case , _snake_case , _snake_case)
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 51 |
'''simple docstring'''
import sys
a_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
_a = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
_a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_a = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = "roberta"
def __init__(self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=5_0_2_6_5 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : Tuple=3_0_7_2 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Any=5_1_2 , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[Any]=1E-12 , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[Any]="absolute" , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
class A ( UpperCAmelCase_ ):
@property
def lowercase_ (self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 353 | import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=__A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=__A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=__A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=__A, default=1_000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=__A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=__A, type=__A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=__A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=__A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
UpperCAmelCase__ = parser.parse_args()
return args
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def fn(__A ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCAmelCase__ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=__A )
UpperCAmelCase__ = tf.train.Example(features=__A )
UpperCAmelCase__ = example.SerializeToString()
records.append(__A )
return records
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(__A ), args.limit )
UpperCAmelCase__ = dataset.select(range(__A ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir, args.split )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
UpperCAmelCase__ = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(__A )
UpperCAmelCase__ = dataset.map(__A, batched=__A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__A ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k], [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0, __A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(__A, batched=__A, batch_size=1_000, num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0, len(__A ), args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["input_ids"] )
UpperCAmelCase__ = os.path.join(__A, f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCAmelCase__ = get_serialized_examples(__A )
with tf.io.TFRecordWriter(__A ) as out_file:
for i in range(len(__A ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(__A )
print("Wrote file {} containing {} records".format(__A, __A ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""", "w" ) as f:
print(f"""Total {args.split} records: {total_records}""", file=__A )
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
main(args)
| 143 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =[]
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , **snake_case__ : List[str] ):
self.events.append("on_init_end" )
def UpperCAmelCase__ ( self : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] , **snake_case__ : int ):
self.events.append("on_train_begin" )
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , **snake_case__ : List[str] ):
self.events.append("on_train_end" )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , **snake_case__ : int ):
self.events.append("on_epoch_begin" )
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Any , **snake_case__ : Optional[Any] ):
self.events.append("on_epoch_end" )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict , **snake_case__ : Optional[Any] ):
self.events.append("on_step_begin" )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[str] , **snake_case__ : int ):
self.events.append("on_step_end" )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[str] , **snake_case__ : Any ):
self.events.append("on_evaluate" )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , **snake_case__ : List[str] ):
self.events.append("on_predict" )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : str , **snake_case__ : List[str] ):
self.events.append("on_save" )
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple , **snake_case__ : List[str] ):
self.events.append("on_log" )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , **snake_case__ : Dict ):
self.events.append("on_prediction_step" )
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Dict =tempfile.mkdtemp()
def UpperCAmelCase__ ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int=0 , snake_case__ : Optional[Any]=0 , snake_case__ : Optional[int]=64 , snake_case__ : List[Any]=64 , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=False , **snake_case__ : Dict ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCamelCase_ : Any =RegressionDataset(length=snake_case__ )
lowerCamelCase_ : List[str] =RegressionDataset(length=snake_case__ )
lowerCamelCase_ : str =RegressionModelConfig(a=snake_case__ , b=snake_case__ )
lowerCamelCase_ : Optional[int] =RegressionPreTrainedModel(snake_case__ )
lowerCamelCase_ : Optional[int] =TrainingArguments(self.output_dir , disable_tqdm=snake_case__ , report_to=[] , **snake_case__ )
return Trainer(
snake_case__ , snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , callbacks=snake_case__ , )
def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
# Order doesn't matter
lowerCamelCase_ : Optional[Any] =sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
lowerCamelCase_ : Any =sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , cba.__class__ )
elif not isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(cba.__class__ , snake_case__ )
else:
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
lowerCamelCase_ : int =["on_init_end", "on_train_begin"]
lowerCamelCase_ : Tuple =0
lowerCamelCase_ : int =len(trainer.get_eval_dataloader() )
lowerCamelCase_ : Union[str, Any] =["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(snake_case__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[Any] =self.get_trainer()
lowerCamelCase_ : str =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# Callbacks passed at init are added to the default callbacks
lowerCamelCase_ : Any =self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCamelCase_ : List[Any] =self.get_trainer(disable_tqdm=snake_case__ )
lowerCamelCase_ : str =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Tuple =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCamelCase_ : List[str] =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
lowerCamelCase_ : str =self.get_trainer()
lowerCamelCase_ : Any =trainer.pop_callback(snake_case__ )
self.assertEqual(cb.__class__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# We can also add, pop, or remove by instance
lowerCamelCase_ : Tuple =self.get_trainer()
lowerCamelCase_ : Any =trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
lowerCamelCase_ : List[str] =self.get_trainer()
lowerCamelCase_ : Union[str, Any] =trainer.callback_handler.callbacks[0]
lowerCamelCase_ : Optional[Any] =trainer.pop_callback(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=snake_case__ )
lowerCamelCase_ : Tuple =self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCamelCase_ : Optional[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# Independent log/save/eval
lowerCamelCase_ : int =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCamelCase_ : List[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCamelCase_ : Dict =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCamelCase_ : int =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCamelCase_ : Optional[int] =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
lowerCamelCase_ : Any =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
lowerCamelCase_ : Optional[int] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# A bit of everything
lowerCamelCase_ : Optional[Any] =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
lowerCamelCase_ : Optional[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
lowerCamelCase_ : Any =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case__ ) in warn_mock.call_args[0][0]
| 144 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCAmelCase ( UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_lowercase , '''_dynamo''' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : List[str] = True ):
'''simple docstring'''
UpperCamelCase__ : int =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : List[Any] =is_compiled_module(_lowercase )
if is_compiled:
UpperCamelCase__ : Union[str, Any] =model
UpperCamelCase__ : Any =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
UpperCamelCase__ : str =model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Union[str, Any] =getattr(_lowercase , '''forward''' )
UpperCamelCase__ : List[Any] =model.__dict__.pop('''_original_forward''' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '''__wrapped__''' ):
UpperCamelCase__ : str =forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Dict =forward
if getattr(_lowercase , '''_converted_to_transformer_engine''' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
UpperCamelCase__ : Optional[Any] =model
UpperCamelCase__ : Tuple =compiled_model
return model
def _lowerCAmelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _lowerCAmelCase ( **UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
for key, value in kwargs.items():
UpperCamelCase__ : Dict =str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCAmelCase ( UpperCAmelCase : List[str] ):
'''simple docstring'''
if not hasattr(_lowercase , '''__qualname__''' ) and not hasattr(_lowercase , '''__name__''' ):
UpperCamelCase__ : int =getattr(_lowercase , '''__class__''' , _lowercase )
if hasattr(_lowercase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_lowercase , '''__name__''' ):
return obj.__name__
return str(_lowercase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
UpperCamelCase__ : Optional[int] =destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
UpperCamelCase__ : Tuple =value
return destination
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] = None ):
'''simple docstring'''
if port is None:
UpperCamelCase__ : List[str] =29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 359 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase : bool , UpperCAmelCase : bool ):
'''simple docstring'''
def run_func(UpperCAmelCase : List[str] ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Tuple ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple =random.Random()
UpperCamelCase__ : List[str] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = "TensorFlow"
@property
def _lowerCAmelCase ( self : int ):
return tf.__version__
def _lowerCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
UpperCamelCase__ : Optional[int] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : str =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_inference )
def _lowerCAmelCase ( self : str , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : int =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_train )
def _lowerCAmelCase ( self : Any , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : Optional[Any] =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_inference )
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : List[Any] =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_train )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Dict =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Dict ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[str] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Optional[int] =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Any =TF_MODEL_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : Optional[int] =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : List[Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase_ , decoder_input_ids=lowercase_ , training=lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase_ , training=lowercase_ )
UpperCamelCase__ : Dict =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Optional[Any] =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Tuple ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[Any] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Dict =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Tuple =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Optional[int] =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : str =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : Union[str, Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase__ : Optional[Any] =model(lowercase_ , decoder_input_ids=lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : Dict =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase__ : Dict =model(lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : List[str] =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
UpperCamelCase__ : List[Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase__ : int =timeit.repeat(
lowercase_ , repeat=self.args.repeat , number=10 , )
return min(lowercase_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCAmelCase ( self : Dict , lowercase_ : Callable[[], None] ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
UpperCamelCase__ : Tuple =start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
UpperCamelCase__ : List[str] ='''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase__ : Optional[Any] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase__ : Dict =nvml.nvmlDeviceGetMemoryInfo(lowercase_ )
UpperCamelCase__ : str =meminfo.used
UpperCamelCase__ : int =Memory(lowercase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
UpperCamelCase__ : Union[str, Any] =None
else:
UpperCamelCase__ : Optional[int] =measure_peak_memory_cpu(lowercase_ )
UpperCamelCase__ : Dict =Memory(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase__ : Tuple =stop_memory_tracing(lowercase_ )
if memory is None:
UpperCamelCase__ : List[Any] =summary.total
else:
UpperCamelCase__ : List[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase_ ( _a : Optional[Any] , _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : Any = model_name.find("""patch""" )
UpperCAmelCase_ : int = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
UpperCAmelCase_ : str = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase )
if "large" in model_name:
UpperCAmelCase_ : Optional[Any] = 768
UpperCAmelCase_ : Any = 3072
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Tuple = 1024
UpperCAmelCase_ : Optional[int] = 4096
UpperCAmelCase_ : Dict = 16
UpperCAmelCase_ : List[Any] = 24
UpperCAmelCase_ : Dict = 768
UpperCAmelCase_ : Optional[int] = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Optional[int] = 336
UpperCAmelCase_ : Union[str, Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase )
if "large" in model_name:
UpperCAmelCase_ : List[Any] = 768
return config
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
if name == "token_embedding.weight":
UpperCAmelCase_ : Union[str, Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
UpperCAmelCase_ : Dict = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
UpperCAmelCase_ : Tuple = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCAmelCase_ : List[str] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCAmelCase_ : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
UpperCAmelCase_ : List[str] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : int = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
UpperCAmelCase_ : List[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : str = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[int] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
UpperCAmelCase_ : Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
UpperCAmelCase_ : List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
UpperCAmelCase_ : List[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
UpperCAmelCase_ : List[Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
UpperCAmelCase_ : str = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : str = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : str = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : Tuple = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
UpperCAmelCase_ : Any = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
UpperCAmelCase_ : Union[str, Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : int = orig_state_dict.pop(_lowerCamelCase )
if "attn.in_proj" in key:
UpperCAmelCase_ : List[str] = key.split(""".""" )
if key.startswith("""visual""" ):
UpperCAmelCase_ : Optional[int] = key_split[3]
UpperCAmelCase_ : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[
:dim, :
]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[
-dim:, :
]
else:
UpperCAmelCase_ : Tuple = val[
:dim
]
UpperCAmelCase_ : Any = val[
dim : dim * 2
]
UpperCAmelCase_ : str = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[
:dim, :
]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[Any] = val[
-dim:, :
]
else:
UpperCAmelCase_ : str = val[:dim]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : Dict = val[-dim:]
elif key.startswith("""mit""" ):
UpperCAmelCase_ : int = key_split[2]
UpperCAmelCase_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : List[str] = val[:dim, :]
UpperCAmelCase_ : Dict = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[Any] = val[:dim]
UpperCAmelCase_ : Optional[Any] = val[dim : dim * 2]
UpperCAmelCase_ : List[Any] = val[-dim:]
else:
UpperCAmelCase_ : Tuple = key_split[2]
UpperCAmelCase_ : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Union[str, Any] = val[:dim, :]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[:dim]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : Optional[int] = rename_key(_lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : Dict = val.T
UpperCAmelCase_ : List[Any] = val
return orig_state_dict
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase_ : Optional[Any] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
UpperCAmelCase_ : Tuple = """eating_spaghetti.npy"""
elif num_frames == 32:
UpperCAmelCase_ : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
UpperCAmelCase_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=_lowerCamelCase , repo_type="""dataset""" , )
UpperCAmelCase_ : Dict = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
def lowerCamelCase_ ( _a : Dict , _a : Optional[Any]=None , _a : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
UpperCAmelCase_ : Tuple = model_to_url[model_name]
UpperCAmelCase_ : Dict = 8
if "16-frames" in model_name:
UpperCAmelCase_ : List[Any] = 16
elif "shot" in model_name:
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : List[Any] = get_xclip_config(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ : List[str] = XCLIPModel(_lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : int = """pytorch_model.bin"""
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase )
UpperCAmelCase_ : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
else:
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase )["""model"""]
UpperCAmelCase_ : str = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = XCLIPModel(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
UpperCAmelCase_ : str = VideoMAEImageProcessor(size=_lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCAmelCase_ : int = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCAmelCase_ : Tuple = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
UpperCAmelCase_ : Tuple = prepare_video(_lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_lowerCamelCase , return_tensors="""pt""" , padding=_lowerCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**_lowerCamelCase )
# Verify outputs
UpperCAmelCase_ : Tuple = outputs.logits_per_video
UpperCAmelCase_ : int = logits_per_video.softmax(dim=1 )
print("""Probs:""" , _lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : str = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : Dict = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : str = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Dict = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : int = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : Tuple = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : str = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Any = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : Dict = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
processor.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowerCamelCase_ ( _a : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.nan
for i in range(_a ):
UpperCAmelCase_ : str = features[:, labels == i]
UpperCAmelCase_ : int = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase_ : int = data - column_reshape(_a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : int = np.dot(_a , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = features.mean(1 )
UpperCAmelCase_ : Union[str, Any] = np.nan
for i in range(_a ):
UpperCAmelCase_ : str = features[:, labels == i]
UpperCAmelCase_ : Dict = data.shape[1]
UpperCAmelCase_ : Any = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : List[Any] = device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a : np.ndarray , _a : int ):
'''simple docstring'''
if features.any():
UpperCAmelCase_ : Dict = features.mean(1 )
# Center the dataset
UpperCAmelCase_ : Any = features - np.reshape(_a , (data_mean.size, 1) )
UpperCAmelCase_ : Tuple = np.dot(_a , centered_data.T ) / features.shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = np.linalg.eigh(_a )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase_ : List[str] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase_ : Dict = np.dot(filtered_eigenvectors.T , _a )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_a )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int , _a : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = eigh(
covariance_between_classes(_a , _a , _a ) , covariance_within_classes(_a , _a , _a ) , )
UpperCAmelCase_ : int = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.linalg.svd(_a )
UpperCAmelCase_ : Dict = svd_matrix[:, 0:dimensions]
UpperCAmelCase_ : Optional[Any] = np.dot(filtered_svd_matrix.T , _a )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_a )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase_ : List[str] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a ) as error_info:
UpperCAmelCase_ : Dict = linear_discriminant_analysis(
_a , _a , _a , _a )
if isinstance(_a , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Tuple = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_a ) as error_info:
UpperCAmelCase_ : Union[str, Any] = principal_component_analysis(_a , _a )
if not np.allclose(_a , _a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A: Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A: List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _snake_case ( UpperCamelCase : str ):
if "://" in dataset_path:
UpperCAmelCase : int = dataset_path.split("""://""" )[1]
return dataset_path
def _snake_case ( UpperCamelCase : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _snake_case ( UpperCamelCase : fsspec.AbstractFileSystem , UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = not is_remote_filesystem(UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(UpperCamelCase ) , fs._strip_protocol(UpperCamelCase ) )
else:
fs.mv(UpperCamelCase , UpperCamelCase , recursive=UpperCamelCase )
def _snake_case ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = threading.Lock()
| 109 |
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCAmelCase : Optional[int] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def A__ ( ) -> List[str]:
__snake_case: Optional[int] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
__snake_case: int = bs[:]
__snake_case: Union[str, Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__)
cs.append(2**8 + n)
n += 1
__snake_case: Dict = [chr(SCREAMING_SNAKE_CASE__) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: List[str] = set()
__snake_case: List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__snake_case: Optional[int] = char
return pairs
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , A : Dict , A : str , A : List[str]="replace" , A : Any="<s>" , A : List[Any]="</s>" , A : Union[str, Any]="</s>" , A : List[str]="<s>" , A : Optional[int]="<unk>" , A : Union[str, Any]="<pad>" , A : List[str]="<mask>" , A : Dict=False , **A : List[str] , ):
__snake_case: Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
__snake_case: int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
__snake_case: str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
__snake_case: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
__snake_case: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
__snake_case: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case: Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
__snake_case: str = json.load(A )
__snake_case: int = {v: k for k, v in self.encoder.items()}
__snake_case: Optional[Any] = errors # how to handle errors in decoding
__snake_case: str = bytes_to_unicode()
__snake_case: Any = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
__snake_case: Dict = merges_handle.read().split("""\n""" )[1:-1]
__snake_case: Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case: List[str] = dict(zip(A , range(len(A ) ) ) )
__snake_case: str = {}
__snake_case: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case: Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self : List[str] ):
return len(self.encoder )
def UpperCAmelCase__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
__snake_case: Any = tuple(A )
__snake_case: Optional[int] = get_pairs(A )
if not pairs:
return token
while True:
__snake_case: List[str] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case: Any = bigram
__snake_case: Optional[int] = []
__snake_case: Union[str, Any] = 0
while i < len(A ):
try:
__snake_case: Optional[int] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case: Any = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case: List[Any] = tuple(A )
__snake_case: Dict = new_word
if len(A ) == 1:
break
else:
__snake_case: List[str] = get_pairs(A )
__snake_case: Optional[int] = """ """.join(A )
__snake_case: List[Any] = word
return word
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[Any] ):
__snake_case: str = []
for token in re.findall(self.pat , A ):
__snake_case: int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self : Union[str, Any] , A : int ):
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : str , A : Any ):
return self.decoder.get(A )
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[Any] = """""".join(A )
__snake_case: Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case: Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
__snake_case: Any = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__snake_case: Union[str, Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case: Dict = [self.cls_token_id]
__snake_case: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[str] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Any = [self.sep_token_id]
__snake_case: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : int , A : str , A : str=False , **A : Optional[Any] ):
__snake_case: List[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
__snake_case: Any = """ """ + text
return (text, kwargs)
| 368 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 248 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "openai/whisper-base"
_lowerCamelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_lowerCamelCase = "transcriber"
_lowerCamelCase = WhisperProcessor
_lowerCamelCase = WhisperForConditionalGeneration
_lowerCamelCase = ["audio"]
_lowerCamelCase = ["text"]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , return_tensors="pt" ).input_features
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.model.generate(inputs=UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 55 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''char'''
lowerCamelCase__ = '''bpe'''
lowerCamelCase__ = '''wp'''
A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings["input_ids"]
return inputs
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences
SCREAMING_SNAKE_CASE_ = char_preds.size(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = final_strs
SCREAMING_SNAKE_CASE_ = final_scores
SCREAMING_SNAKE_CASE_ = char_strs
SCREAMING_SNAKE_CASE_ = bpe_strs
SCREAMING_SNAKE_CASE_ = wp_strs
return out
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE_ = self.char_decode
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE_ = self.bpe_decode
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE_ = self.wp_decode
SCREAMING_SNAKE_CASE_ = 102
SCREAMING_SNAKE_CASE_ = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = pred_logits.size(0 )
SCREAMING_SNAKE_CASE_ = pred_logits.size(1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:]
SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def __A ( self : Any , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def __A ( self : str , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 118 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_snake_case = logging.getLogger(__name__)
_snake_case = 50 # max width of layer names
_snake_case = 70 # max width of quantizer names
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=_lowerCamelCase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=_lowerCamelCase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=_lowerCamelCase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=_lowerCamelCase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=_lowerCamelCase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=_lowerCamelCase , type=_lowerCamelCase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=_lowerCamelCase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def A ( _lowerCamelCase ):
'''simple docstring'''
if args.calibrator == "max":
_lowerCAmelCase : Any = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
_lowerCAmelCase : Tuple = "histogram"
elif args.calibrator == "mse":
_lowerCAmelCase : str = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_lowerCAmelCase : int = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
_lowerCAmelCase : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ["embeddings"] , which="weight" , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [""] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def fusea(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
_lowerCAmelCase : Optional[int] = qq._amax.detach().item()
_lowerCAmelCase : int = qk._amax.detach().item()
_lowerCAmelCase : Optional[Any] = qv._amax.detach().item()
_lowerCAmelCase : Tuple = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
_lowerCAmelCase : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
_lowerCAmelCase : str = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
_lowerCAmelCase : Dict = mod.weight.shape[0]
_lowerCAmelCase : Any = mod._weight_quantizer._amax.detach()
_lowerCAmelCase : List[Any] = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowerCAmelCase : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowerCAmelCase : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowerCAmelCase : List[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_lowerCAmelCase : Tuple = amax
def A ( _lowerCamelCase , _lowerCamelCase=25 , _lowerCamelCase=180 , _lowerCamelCase=None ):
'''simple docstring'''
if ignore is None:
_lowerCAmelCase : Tuple = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [ignore]
_lowerCAmelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , "weight" ):
continue
_lowerCAmelCase : Any = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , "_input_quantizer" , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , "_weight_quantizer" , _lowerCamelCase )
if not hasattr(_lowerCamelCase , "weight" ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
_lowerCAmelCase : Any = F"Act:{input_q.extra_repr()}"
_lowerCAmelCase : List[Any] = F"Wgt:{weight_q.extra_repr()}"
_lowerCAmelCase : Any = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="both" , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_input_quantizer" , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_weight_quantizer" , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_input_quantizer" ) or hasattr(_lowerCamelCase , "_weight_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 360 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = WavaVecaForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Tuple = downstream_dict["projector.weight"]
_lowerCAmelCase : Optional[int] = downstream_dict["projector.bias"]
_lowerCAmelCase : List[Any] = downstream_dict["model.post_net.linear.weight"]
_lowerCAmelCase : Tuple = downstream_dict["model.post_net.linear.bias"]
return model
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : List[Any] = downstream_dict["model.linear.weight"]
_lowerCAmelCase : Dict = downstream_dict["model.linear.bias"]
return model
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = WavaVecaForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : List[str] = downstream_dict["connector.weight"]
_lowerCAmelCase : List[Any] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCAmelCase : List[Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
_lowerCAmelCase : Optional[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
_lowerCAmelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_lowerCAmelCase : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_lowerCAmelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_lowerCAmelCase : List[str] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_lowerCAmelCase : str = downstream_dict["objective.W"]
return model
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : str = checkpoint["Downstream"]
_lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
_lowerCAmelCase : str = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_lowerCAmelCase : List[Any] = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_lowerCAmelCase : str = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("ForXVector" ):
_lowerCAmelCase : Optional[Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
_lowerCAmelCase : Dict = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_snake_case = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 36 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase ( nn.Module ):
def __init__( self :Optional[int] ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Linear(3 , 4 )
lowercase__ = nn.BatchNormad(4 )
lowercase__ = nn.Linear(4 , 5 )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , model.state_dict() )
lowercase__ = os.path.join(_lowercase , "index.json" )
self.assertTrue(os.path.isfile(_lowercase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase__ = os.path.join(_lowercase , f'''{key}.dat''' )
self.assertTrue(os.path.isfile(_lowercase ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase__ = torch.randn(2 , 3 , dtype=_lowercase )
with TemporaryDirectory() as tmp_dir:
lowercase__ = offload_weight(_lowercase , "weight" , _lowercase , {} )
lowercase__ = os.path.join(_lowercase , "weight.dat" )
self.assertTrue(os.path.isfile(_lowercase ) )
self.assertDictEqual(_lowercase , {"weight": {"shape": [2, 3], "dtype": str(_lowercase ).split("." )[1]}} )
lowercase__ = load_offloaded_weight(_lowercase , index["weight"] )
self.assertTrue(torch.equal(_lowercase , _lowercase ) )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = ModelForTest()
lowercase__ = model.state_dict()
lowercase__ = {k: v for k, v in state_dict.items() if "linear2" not in k}
lowercase__ = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
lowercase__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
lowercase__ = {k: v for k, v in state_dict.items() if "weight" in k}
lowercase__ = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
lowercase__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
# Duplicates are removed
lowercase__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = {"a.1": 0, "a.10": 1, "a.2": 2}
lowercase__ = extract_submodules_state_dict(_lowercase , ["a.1", "a.2"] )
self.assertDictEqual(_lowercase , {"a.1": 0, "a.2": 2} )
lowercase__ = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
lowercase__ = extract_submodules_state_dict(_lowercase , ["a.1", "a.2"] )
self.assertDictEqual(_lowercase , {"a.1.a": 0, "a.2.a": 2} )
| 358 |
def _A ( __magic_name__ ):
lowercase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __magic_name__ ):
lowercase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ = remove_duplicates(key.upper() )
lowercase__ = len(__magic_name__ )
# First fill cipher with key characters
lowercase__ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 26 ):
lowercase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ = alphabet[i - offset]
lowercase__ = char
return cipher_alphabet
def _A ( __magic_name__ , __magic_name__ ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( ):
lowercase__ = input("Enter message to encode or decode: " ).strip()
lowercase__ = input("Enter keyword: " ).strip()
lowercase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowercase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowercase__ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__snake_case : Tuple = logging.get_logger(__name__)
class A__(lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 248 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
from __future__ import annotations
def UpperCamelCase__( UpperCamelCase__ : list[float] , UpperCamelCase__ : Optional[Any] )->List[Any]:
print(f"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(UpperCamelCase__ ):
print(f"{i}\t\t{d}" )
def UpperCamelCase__( UpperCamelCase__ : list[dict[str, int]] , UpperCamelCase__ : list[float] , UpperCamelCase__ : int )->Dict:
for j in range(UpperCamelCase__ ):
A__ , A__ , A__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCamelCase__( UpperCamelCase__ : list[dict[str, int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int )->list[float]:
A__ = [float('''inf''' )] * vertex_count
A__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
A__ , A__ , A__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
A__ = distance[u] + w
A__ = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__: int = int(input('Enter number of vertices: ').strip())
a__: List[Any] = int(input('Enter number of edges: ').strip())
a__: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
a__ , a__ , a__: Any = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
a__: Tuple = {'src': src, 'dst': dest, 'weight': weight}
a__: Tuple = int(input('\nEnter shortest path source:').strip())
a__: List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 39 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : Any=1_0_0 ,lowercase_ : str=1_3 ,lowercase_ : Any=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=3 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=5 ,lowercase_ : Any=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Dict=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : int=0.02 ,lowercase_ : List[str]=3 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Any = FlaxBeitModel(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : Dict = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(lowercase_ )
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Dict ,**lowercase_ : List[Any] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Any = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase__ : str = np.ones((1, 1_9_6) ,dtype=lowercase_ )
# forward pass
lowerCAmelCase__ : Dict = model(pixel_values=lowercase_ ,bool_masked_pos=lowercase_ )
lowerCAmelCase__ : str = outputs.logits
# verify the logits
lowerCAmelCase__ : Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowercase_ ,atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Dict = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
| 106 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | 1 |
"""simple docstring"""
import math
import sys
def _UpperCAmelCase ( __lowerCamelCase : int ) -> str:
if number != int(a__ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
_snake_case = [-1] * (number + 1)
_snake_case = 0
for i in range(1 , number + 1 ):
_snake_case = sys.maxsize
_snake_case = int(math.sqrt(a__ ) )
for j in range(1 , root + 1 ):
_snake_case = 1 + answers[i - (j**2)]
_snake_case = min(a__ , a__ )
_snake_case = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = [[] for _ in range(_lowerCamelCase )]
_snake_case = size
def __getitem__( self : Optional[int] , _lowerCamelCase : int ):
return iter(self._graph[vertex] )
@property
def lowercase ( self : Optional[int] ):
return self._size
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_lowerCamelCase , _lowerCamelCase ) )
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
_snake_case = deque([start_vertex] )
_snake_case = [None] * self.size
_snake_case = 0
while queue:
_snake_case = queue.popleft()
_snake_case = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case = current_distance + edge.weight
_snake_case = distances[edge.destination_vertex]
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowerCAmelCase__ = "", lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__) -> Optional[Any]:
super().__init__(self, **lowerCAmelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ = fsspec.open(
lowerCAmelCase__, mode='rb', protocol=lowerCAmelCase__, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {}), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case_ = os.path.basename(self.file.path.split('::')[0])
snake_case_ = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Tuple:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase__).lstrip('/')
def a_ ( self) -> Any:
if self.dir_cache is None:
snake_case_ = {**self.file.fs.info(self.file.path), '''name''': self.uncompressed_name}
snake_case_ = {f['''name''']: f}
def a_ ( self, lowerCAmelCase__) -> Tuple:
return self.file.open().read()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "rb", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> List[Any]:
snake_case_ = self._strip_protocol(lowerCAmelCase__)
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'')
return self.file.open()
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "bz2"
SCREAMING_SNAKE_CASE_ = "bz2"
SCREAMING_SNAKE_CASE_ = ".bz2"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "gzip"
SCREAMING_SNAKE_CASE_ = "gzip"
SCREAMING_SNAKE_CASE_ = ".gz"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "lz4"
SCREAMING_SNAKE_CASE_ = "lz4"
SCREAMING_SNAKE_CASE_ = ".lz4"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "xz"
SCREAMING_SNAKE_CASE_ = "xz"
SCREAMING_SNAKE_CASE_ = ".xz"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "zstd"
SCREAMING_SNAKE_CASE_ = "zstd"
SCREAMING_SNAKE_CASE_ = ".zst"
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "rb", lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = DEFAULT_BLOCK_SIZE, **lowerCAmelCase__, ) -> str:
super().__init__(
fo=lowerCAmelCase__, mode=lowerCAmelCase__, target_protocol=lowerCAmelCase__, target_options=lowerCAmelCase__, block_size=lowerCAmelCase__, **lowerCAmelCase__, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ = self.file.__enter__
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = file_
def __enter__( self) -> List[str]:
self._file.__enter__()
return self
def __exit__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]:
self._file.__exit__(*lowerCAmelCase__, **lowerCAmelCase__)
def __iter__( self) -> Any:
return iter(self._file)
def a_ ( self) -> List[str]:
return next(self._file)
def __getattr__( self, lowerCAmelCase__) -> List[Any]:
return getattr(self._file, lowerCAmelCase__)
def fixed_enter(*lowerCAmelCase__, **lowerCAmelCase__):
return WrappedFile(_enter(*lowerCAmelCase__, **lowerCAmelCase__))
snake_case_ = fixed_enter
| 69 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
'''simple docstring'''
from math import pow, sqrt
def a ( *__a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = len(A_ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
) | 358 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__snake_case = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = ['input_ids', 'attention_mask']
_a = DistilBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Optional[Any] = do_lower_case
UpperCamelCase__ :Optional[Any] = strip_accents
UpperCamelCase__ :List[Any] = tokenize_chinese_chars
UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :int = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 219 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCamelCase = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
UpperCamelCase = parser.parse_args()
if args.check_lib:
UpperCamelCase = importlib.import_module('''transformers''')
UpperCamelCase = Path(transformers_module.__file__).parent
else:
UpperCamelCase = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 306 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _A : Optional[int] , _A : bool = True , _A : float = math.inf , _A : float = -math.inf , _A : float = math.inf , _A : float = -math.inf , _A : bool = False , _A : float = 100 , _A : float = 0.01 , _A : float = 1 , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[Any] = search_prob
lowerCamelCase__ : List[str] = start_temperate
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[Any] = None
while not search_end:
lowerCamelCase__ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : List[Any] = current_state
scores.append(_A )
iterations += 1
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : Any = random.randint(0 , len(_A ) - 1 ) # picking a random neighbor
lowerCamelCase__ : List[Any] = neighbors.pop(_A )
lowerCamelCase__ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : Optional[Any] = picked_neighbor
else:
lowerCamelCase__ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Optional[int] = picked_neighbor
lowerCamelCase__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : int = True
else:
lowerCamelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_A ) , _A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _A : List[str] , _A : int ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Dict = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( _A : int , _A : Any ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
A : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 184 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if "model" in orig_key:
__UpperCAmelCase : int = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
__UpperCAmelCase : Optional[int] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
__UpperCAmelCase : List[str] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
__UpperCAmelCase : Any = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
__UpperCAmelCase : int = orig_key.split(""".""" )[0].split("""_""" )[-1]
__UpperCAmelCase : List[str] = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__UpperCAmelCase : List[Any] = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
__UpperCAmelCase : Tuple = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
__UpperCAmelCase : Tuple = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
__UpperCAmelCase : List[str] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
__UpperCAmelCase : Union[str, Any] = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
__UpperCAmelCase : Any = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
__UpperCAmelCase : Optional[int] = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
__UpperCAmelCase : Optional[Any] = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
__UpperCAmelCase : int = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
__UpperCAmelCase : List[Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
__UpperCAmelCase : Optional[int] = """yoso.""" + orig_key
return orig_key
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : str = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__UpperCAmelCase : List[Any] = val
__UpperCAmelCase : Optional[Any] = orig_state_dict["""cls.predictions.decoder.bias"""]
__UpperCAmelCase : str = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model_state_dict"""]
__UpperCAmelCase : List[str] = YosoConfig.from_json_file(lowerCAmelCase__ )
__UpperCAmelCase : str = YosoForMaskedLM(lowerCAmelCase__ )
__UpperCAmelCase : Dict = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__UpperCAmelCase : Any = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
__UpperCAmelCase : Union[str, Any] = nums[i]
__UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_UpperCamelCase = int(input('''Enter number of elements : ''').strip())
_UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ["""image_processor""", """feature_extractor"""]
UpperCamelCase_ : Union[str, Any] = """TvltImageProcessor"""
UpperCamelCase_ : Optional[Any] = """TvltFeatureExtractor"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
'''simple docstring'''
super().__init__(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
A: Optional[Any] = image_processor
A: int = feature_extractor
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Any:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
A: Any = None
if images is not None:
A: Optional[Any] = self.image_processor(UpperCamelCase__ , mask_pixel=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if images_mixed is not None:
A: List[str] = self.image_processor(UpperCamelCase__ , is_mixed=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if audio is not None:
A: str = self.feature_extractor(
UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , mask_audio=UpperCamelCase__ , **UpperCamelCase__ )
A: Dict = {}
if audio is not None:
output_dict.update(UpperCamelCase__ )
if images is not None:
output_dict.update(UpperCamelCase__ )
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase__ )
return output_dict
@property
def _snake_case ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A: Optional[int] = self.image_processor.model_input_names
A: Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 319 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(a__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
a__ : List[Any] = '''Enter the base and the power separated by a comma: '''
a__ , a__ : Dict = map(int, input(prompt).split(''','''))
a__ , a__ : Union[str, Any] = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
a__ : Tuple = res(xa, ya)
a__ : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = eval_examples
SCREAMING_SNAKE_CASE : Optional[int] = post_process_function
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ) ->Dict[str, float]:
SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE : Dict = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE : Any = gen_kwargs
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Tuple = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Dict = compute_metrics
SCREAMING_SNAKE_CASE : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : str = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = time.time()
SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Any = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 19 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en/tasks"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
__magic_name__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__magic_name__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = TASK_GUIDE_TO_MODELS[task_guide]
__SCREAMING_SNAKE_CASE = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
__SCREAMING_SNAKE_CASE = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
__SCREAMING_SNAKE_CASE = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__magic_name__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 100 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return self.id
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = weight
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _A ( lowercase__ , lowercase__ ):
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
lowercase__ = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Dict = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''trajectory_transformer'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , lowercase_ : Tuple=100 , lowercase_ : Optional[int]=5 , lowercase_ : Optional[int]=1 , lowercase_ : int=1 , lowercase_ : Any=249 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=17 , lowercase_ : Tuple=25 , lowercase_ : Any=4 , lowercase_ : int=4 , lowercase_ : Optional[Any]=128 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Union[str, Any]=0.00_06 , lowercase_ : List[Any]=512 , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=1 , lowercase_ : Optional[int]=50256 , lowercase_ : Union[str, Any]=50256 , **lowercase_ : int , ):
lowercase_ : str = vocab_size
lowercase_ : Union[str, Any] = action_weight
lowercase_ : int = reward_weight
lowercase_ : List[Any] = value_weight
lowercase_ : Dict = max_position_embeddings
lowercase_ : List[str] = block_size
lowercase_ : Optional[Any] = action_dim
lowercase_ : Any = observation_dim
lowercase_ : Union[str, Any] = transition_dim
lowercase_ : int = learning_rate
lowercase_ : Tuple = n_layer
lowercase_ : Optional[int] = n_head
lowercase_ : Any = n_embd
lowercase_ : str = embd_pdrop
lowercase_ : int = attn_pdrop
lowercase_ : int = resid_pdrop
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : int = kaiming_initializer_range
lowercase_ : List[str] = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 363 | '''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_a : Any = (low + high) // 2
_a , _a , _a : Tuple = max_subarray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a , _a : List[Any] = max_subarray(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
_a , _a , _a : Optional[int] = max_cross_sum(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[int, int, float]:
_a , _a : List[Any] = float('-inf' ), -1
_a , _a : List[str] = float('-inf' ), -1
_a : int | float = 0
for i in range(lowerCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_a : int = summ
_a : List[str] = i
_a : List[str] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_a : List[Any] = summ
_a : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCamelCase ( lowerCAmelCase_ ) -> float:
_a : List[Any] = [randint(1 , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
_a : str = time.time()
max_subarray(lowerCAmelCase_ , 0 , input_size - 1 )
_a : Optional[Any] = time.time()
return end - start
def __lowerCamelCase ( ) -> None:
_a : Tuple = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_a : Union[str, Any] = [time_max_subarray(lowerCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
print(lowerCAmelCase_ , '\t\t' , lowerCAmelCase_ )
plt.plot(lowerCAmelCase_ , lowerCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 89 | """simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : np.ndarray
lowerCamelCase__ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 77 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( _lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs()
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_dummy_inputs()
_lowerCamelCase : str = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ort.SessionOptions()
_lowerCamelCase : List[str] = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) )
_lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 175 | 0 |
import os
def __snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/p022_names.txt" ) as file:
A_ = str(file.readlines()[0] )
A_ = names.replace("\"" ,"" ).split("," )
names.sort()
A_ = 0
A_ = 0
for i, name in enumerate(__UpperCamelCase ):
for letter in name:
name_score += ord(__UpperCamelCase ) - 64
total_score += (i + 1) * name_score
A_ = 0
return total_score
if __name__ == "__main__":
print(solution()) | 312 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }") | 312 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowerCamelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : List[Any] = _distribute_shards(**lowerCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Tuple = _split_gen_kwargs(lowerCamelCase_ , lowerCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowerCamelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCamelCase_ )
else:
snake_case_ : Dict = _number_of_shards_in_gen_kwargs(lowerCamelCase_ )
assert out == expected | 371 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase_ ):
snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ , snake_case_ : Optional[Any] = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx]
snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
snake_case_ : Optional[Any] = [0] + result[-1] + [0]
snake_case_ : Dict = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) )
snake_case_ : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ : str = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None:
snake_case_ : Dict = F'''{func.__name__}({value})'''
snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 8 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
# Construct model
if gpta_config_file == "":
__a = GPTaConfig()
else:
__a = GPTaConfig.from_json_file(a__ )
__a = GPTaModel(a__ )
# Load weights from numpy
load_tf_weights_in_gpta(a__ , a__ , a__ )
# Save pytorch-model
__a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , a__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 6 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 1 |
import numpy as np
__A = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] ) -> None:
_A = np.array(__lowerCamelCase )
def __A ( self: Any , __A: Optional[int] ) -> np.ndarray:
_A = np.where(letter == self.SQUARE )
_A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __A ( self: Dict , __A: Any , __A: str ) -> str:
_A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __A ( self: Dict , __A: Union[str, Any] ) -> str:
_A = message.lower()
_A = message.replace(''' ''' , '''''' )
_A = message.replace('''j''' , '''i''' )
_A = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape(2 * len(__lowerCamelCase ) )
_A = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
_A = int(second_step[numbers_index * 2] )
_A = int(second_step[(numbers_index * 2) + 1] )
_A = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
_A = encoded_message + letter
return encoded_message
def __A ( self: Optional[int] , __A: Optional[int] ) -> str:
_A = message.lower()
message.replace(''' ''' , '''''' )
_A = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape((2, len(__lowerCamelCase )) )
_A = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
_A = int(second_step[0, numbers_index] )
_A = int(second_step[1, numbers_index] )
_A = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
_A = decoded_message + letter
return decoded_message
| 355 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
_A = get_focalnet_config(_lowercase )
_A = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=2_24 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
_A = processor(images=_lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
_A = model(**_lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 | 0 |
import os
from collections.abc import Iterator
def A_ ( A__ = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(A__ ):
a__ : Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip('./' )
def A_ ( A__ ) -> List[str]:
return F'{i * " "}*' if i else "\n##"
def A_ ( A__ , A__ ) -> str:
a__ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def A_ ( A__ = "." ) -> None:
a__ : Optional[Any] = ''
for filepath in sorted(good_file_paths(A__ ) ):
a__ , a__ : Union[str, Any] = os.path.split(A__ )
if filepath != old_path:
a__ : Tuple = print_path(A__ , A__ )
a__ : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
a__ : Any = F'{filepath}/{filename}'.replace(' ' , '%20' )
a__ : List[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 99 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , "width_multiplier" ) )
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=64 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__="swish" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.02 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=10 , UpperCAmelCase__=None , UpperCAmelCase__=0.25 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , ):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = make_divisible(512 * width_multiplier , divisor=8 )
A__ = hidden_act
A__ = conv_kernel_size
A__ = output_stride
A__ = classifier_dropout_prob
A__ = use_labels
A__ = is_training
A__ = num_labels
A__ = initializer_range
A__ = scope
A__ = width_multiplier
A__ = ffn_dropout
A__ = attn_dropout
def __A ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = MobileViTVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MobileViTVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MobileViTVaForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : str = False
def __A ( self ):
A__ = MobileViTVaModelTester(self )
A__ = MobileViTVaConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
A__ = outputs.hidden_states
A__ = 5
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A__ = 2
for i in range(len(UpperCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
@slow
def __A ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = MobileViTVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCamelCase ( )-> int:
"""simple docstring"""
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self ):
A__ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
UpperCAmelCase__ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
# verify the logits
A__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
A__ = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = model.to(UpperCAmelCase__ )
A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = model.to(UpperCAmelCase__ )
A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ , target_sizes=[(50, 60)] )
A__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ )
A__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
| 198 |
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.