code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
__lowercase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowercase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
if "metadata" in layer:
__lowercase = layer.split('metadata' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowercase = layer.split('kvstore' )
__lowercase = ''''''.join(split_layer[0] )[:-1]
__lowercase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowercase = layer.split('/' )
__lowercase = '''/'''.join(split_layer[:-1] )
__lowercase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowercase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowercase = '''file'''
else:
__lowercase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> int:
__lowercase = rename_keys(__a )
__lowercase = {}
for k, v in current_block.items():
__lowercase = v
__lowercase = new_current_block
torch.save(__a , __a )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] = WEIGHTS_NAME ) -> Dict:
__lowercase = convert_file_size_to_int(__a )
__lowercase = []
__lowercase = {}
__lowercase = 0
__lowercase = 0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__lowercase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__lowercase = flatten_dict(__a , sep='/' )
__lowercase = {}
for layer in checkpoint_info.keys():
__lowercase = get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
__lowercase = content
else:
__lowercase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowercase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowercase = torch.tensor(__a )
__lowercase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowercase = rename_base_flax_keys(tuple(key.split('/' ) ) , __a )
__lowercase = '''/'''.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowercase = os.path.join(
__a , weights_name.replace('.bin' , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowercase = {}
__lowercase = 0
__lowercase = raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowercase = os.path.join(__a , weights_name.replace('.bin' , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowercase = {}
__lowercase = {}
for idx, shard in enumerate(__a ):
__lowercase = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowercase = os.path.join(__a , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
__lowercase = shard
for key in shard:
__lowercase = shard_file
# Add the metadata
__lowercase = {'''total_size''': total_size}
__lowercase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a , __a ) , 'w' , encoding='utf-8' ) as f:
__lowercase = json.dumps(__a , indent=2 , sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowercase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowercase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__lowercase = TaTokenizer.from_pretrained('t5-small' )
__lowercase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__lowercase = tokenizer(__a , return_tensors='pt' ).input_ids
__lowercase = model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 325 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 91 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =cva.getAffineTransform(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cva.warpAffine(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (rows, cols) )
if __name__ == "__main__":
# read original image
_A = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
_A = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_A , _A = gray_img.shape
# set different points to rotate image
_A = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_A = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_A = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_A = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_A = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_A = plt.figure(1)
_A = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 117 |
import pprint
import requests
_A = 'https://zenquotes.io/api'
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 117 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a = False
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 'A painting of a squirrel eating a burger '
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
_A = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = generator.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self : Tuple ):
_A = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 'A painting of a squirrel eating a burger '
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 315 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : int = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module('transformers')
lowercase : str = Path(transformers_module.__file__).parent
else:
lowercase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 311 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
snake_case__ : int = import_module('''tasks''' )
try:
snake_case__ : Optional[int] = getattr(__lowerCAmelCase , model_args.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ : Optional[int] = token_classification_task.get_labels(data_args.labels )
snake_case__ : Dict[int, str] = dict(enumerate(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ : Dict = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[List[int], List[int]]:
snake_case__ : Any = np.argmax(__lowerCAmelCase , axis=2 )
snake_case__ , snake_case__ : List[Any] = preds.shape
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
snake_case__ : int = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase ) -> Dict:
snake_case__ , snake_case__ : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
snake_case__ : Any = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
snake_case__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ , snake_case__ , snake_case__ : List[str] = trainer.predict(__lowerCAmelCase )
snake_case__ , snake_case__ : int = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
snake_case__ : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 230 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : int = BertTokenizer
__lowerCAmelCase : int = BertTokenizerFast
__lowerCAmelCase : int = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Any = filter_non_english
def __lowerCamelCase ( self :str ):
super().setUp()
snake_case__ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :List[str] ,__lowercase :str ):
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : str = '''unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = self.tokenizer_class(self.vocab_file )
snake_case__ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCamelCase ( self :List[str] ):
if not self.test_rust_tokenizer:
return
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : Dict = tokenizer.tokenize(__lowercase )
snake_case__ : int = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : List[str] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : List[Any] = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
# With lower casing
snake_case__ : List[Any] = self.get_tokenizer(do_lower_case=__lowercase )
snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=__lowercase )
snake_case__ : int = '''UNwant\u00E9d,running'''
snake_case__ : List[str] = tokenizer.tokenize(__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : int = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Tuple = self.get_rust_tokenizer()
snake_case__ : int = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :int ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[Any] = BasicTokenizer(do_lower_case=__lowercase ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = BasicTokenizer()
snake_case__ : Any = '''a\n\'ll !!to?\'d of, can\'t.'''
snake_case__ : Optional[int] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
snake_case__ : Optional[Any] = {}
for i, token in enumerate(__lowercase ):
snake_case__ : Dict = i
snake_case__ : int = WordpieceTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCamelCase ( self :Union[str, Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCamelCase ( self :str ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
snake_case__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowercase )
snake_case__ : str = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCamelCase ( self :List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : List[str] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case__ : List[Any] = tokenizer_r.encode_plus(
__lowercase ,return_attention_mask=__lowercase ,return_token_type_ids=__lowercase ,return_offsets_mapping=__lowercase ,add_special_tokens=__lowercase ,)
snake_case__ : List[str] = tokenizer_r.do_lower_case if hasattr(__lowercase ,'''do_lower_case''' ) else False
snake_case__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = ['''的''', '''人''', '''有''']
snake_case__ : Optional[int] = ''''''.join(__lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Optional[Any] = True
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : str = False
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowercase )
]
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
| 230 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = torch.exp(lowercase_ )
A__ = torch.sum(lowercase_ , dim=1 ) # sum of exp(x_i)
A__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowercase_ ) - B / A
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
super().__init__()
A__ = config.output_attentions
A__ = config.output_hidden_states
A__ = nn.ModuleList([BertLayer(UpperCAmelCase__) for _ in range(config.num_hidden_layers)])
A__ = nn.ModuleList([BertHighway(UpperCAmelCase__) for _ in range(config.num_hidden_layers)])
A__ = [-1 for _ in range(config.num_hidden_layers)]
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->List[Any]:
'''simple docstring'''
if (type(UpperCAmelCase__) is float) or (type(UpperCAmelCase__) is int):
for i in range(len(self.early_exit_entropy)):
A__ = x
else:
A__ = x
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , ) ->List[str]:
'''simple docstring'''
A__ = ()
A__ = ()
A__ = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__)
A__ = layer_outputs[0]
if self.output_attentions:
A__ = all_attentions + (layer_outputs[1],)
A__ = (hidden_states,)
if self.output_hidden_states:
A__ = current_outputs + (all_hidden_states,)
if self.output_attentions:
A__ = current_outputs + (all_attentions,)
A__ = self.highway[i](UpperCAmelCase__)
# logits, pooled_output
if not self.training:
A__ = highway_exit[0]
A__ = entropy(UpperCAmelCase__)
A__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A__ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1)
else:
A__ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = (hidden_states,)
if self.output_hidden_states:
A__ = outputs + (all_hidden_states,)
if self.output_attentions:
A__ = outputs + (all_attentions,)
A__ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , UpperCAmelCase__ , )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__)
A__ = config
A__ = BertEmbeddings(UpperCAmelCase__)
A__ = DeeBertEncoder(UpperCAmelCase__)
A__ = BertPooler(UpperCAmelCase__)
self.init_weights()
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler)
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->int:
'''simple docstring'''
A__ = value
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->List[Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__)
@add_start_docstrings_to_model_forward(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , ) ->str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
A__ = input_ids.size()
elif inputs_embeds is not None:
A__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
A__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__)
if encoder_attention_mask is None:
A__ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__)
if token_type_ids is None:
A__ = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A__ = encoder_attention_mask[:, None, None, :]
A__ = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
A__ = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers)
A__ = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__)
A__ = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = encoder_outputs[0]
A__ = self.pooler(UpperCAmelCase__)
A__ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
A__ = message
A__ = exit_layer # start from 1!
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
super().__init__()
A__ = BertPooler(UpperCAmelCase__)
A__ = nn.Dropout(config.hidden_dropout_prob)
A__ = nn.Linear(config.hidden_size , config.num_labels)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = encoder_outputs[0]
A__ = self.pooler(UpperCAmelCase__)
# "return" pooler_output
# BertModel
A__ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A__ = bmodel_output[1]
A__ = self.dropout(UpperCAmelCase__)
A__ = self.classifier(UpperCAmelCase__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , UpperCAmelCase__ , )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str]) ->Tuple:
'''simple docstring'''
super().__init__(UpperCAmelCase__)
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeBertModel(UpperCAmelCase__)
A__ = nn.Dropout(config.hidden_dropout_prob)
A__ = nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=False , ) ->str:
'''simple docstring'''
A__ = self.num_layers
try:
A__ = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A__ = outputs[1]
A__ = self.dropout(UpperCAmelCase__)
A__ = self.classifier(UpperCAmelCase__)
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCAmelCase__)
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1) , labels.view(-1))
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(UpperCAmelCase__)
if train_highway:
A__ = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 231 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase : int = 1_0_0_0_0
UpperCamelCase : Optional[List[str]] = None
UpperCamelCase : Optional[datasets.Features] = None
class __lowercase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ParquetConfig
def __A ( self ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A , (str, list, tuple) ):
lowerCamelCase = data_files
if isinstance(A , A ):
lowerCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(A , A ):
lowerCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase = [dl_manager.iter_files(A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A ):
with open(A , """rb""" ) as f:
lowerCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(A ) )
break
splits.append(datasets.SplitGenerator(name=A , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase = table_cast(A , self.info.features.arrow_schema )
return pa_table
def __A ( self , A ) -> Any:
'''simple docstring'''
lowerCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ):
with open(A , """rb""" ) as f:
lowerCamelCase = pq.ParquetFile(A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(A )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(A )}: {e}' )
raise
| 252 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase : Optional[int] = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=1 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> Any:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Any:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = WavaVecaFeatureExtractionTester(self )
def __A ( self , A ) -> Any:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values
lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values
lowerCamelCase = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16_00, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" )
lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = range(8_00 , 14_00 , 2_00 )
lowerCamelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16_00, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feat_extract(A , max_length=A , padding=A )
lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feat_extract(
A , truncation=A , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __A ( self ) -> str:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCamelCase = WavaVecaConfig.from_pretrained(A )
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 252 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=8 ) ->List[Any]:
"""simple docstring"""
lowercase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if latents is None:
lowercase : Any = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase : Optional[int] = latents.to(SCREAMING_SNAKE_CASE__ )
lowercase : int = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : int = torch.device(f"""cuda:{gpu_id}""" )
lowercase : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase : Dict = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase : List[str] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
lowercase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = 4.0 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : List[str] = self._execution_device
lowercase : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowercase : Union[str, Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : Tuple = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.scheduler.timesteps
lowercase : Tuple = self.unet.config.in_channels
lowercase , lowercase : Optional[int] = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
lowercase : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : List[str] = {'''image_embeds''': image_embeds}
lowercase : Dict = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
lowercase , lowercase : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase : List[str] = noise_pred.chunk(2 )
lowercase , lowercase : Dict = variance_pred.chunk(2 )
lowercase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : List[str] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
lowercase : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase : Optional[Any] = image * 0.5 + 0.5
lowercase : List[Any] = image.clamp(0 , 1 )
lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase : Dict = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 173 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase , lowercase : str = emb.weight.shape
lowercase : Optional[int] = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
lowercase : Any = emb.weight.data
return lin_layer
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = torch.load(_UpperCamelCase, map_location='''cpu''' )
lowercase : List[str] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase : int = mam_aaa['''model''']
remove_ignore_keys_(_UpperCamelCase )
lowercase : Any = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase : Dict = MaMaaaConfig(
vocab_size=_UpperCamelCase, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
lowercase : Union[str, Any] = state_dict['''decoder.embed_tokens.weight''']
lowercase : Dict = MaMaaaForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
lowercase : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__a = parser.parse_args()
__a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 173 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ =datasets.logging.get_logger(__name__)
UpperCamelCase__ ='\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCamelCase__ ='\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCamelCase__ ='\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False, __lowerCamelCase=True, __lowerCamelCase=False, __lowerCamelCase="dummy_doc" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {doc: key_lines}
_SCREAMING_SNAKE_CASE : List[str] = {doc: sys_lines}
_SCREAMING_SNAKE_CASE : List[Any] = {}
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = reader.get_doc_mentions(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_SCREAMING_SNAKE_CASE : List[Any] = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = reader.get_doc_mentions(__lowerCamelCase, sys_doc_lines[doc], __lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_SCREAMING_SNAKE_CASE : Tuple = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase )
if remove_nested:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_SCREAMING_SNAKE_CASE : Optional[Any] = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = get_coref_infos(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Tuple = 0
for name, metric in metrics:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = evaluator.evaluate_documents(__lowerCamelCase, __lowerCamelCase, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ), f"""Recall: {recall * 100:.2f}""", f""" Precision: {precision * 100:.2f}""", f""" F1: {fa * 100:.2f}""", )
if conll_subparts_num == 3:
_SCREAMING_SNAKE_CASE : Optional[Any] = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_SCREAMING_SNAKE_CASE : Any = line.split()[5]
if not parse_col == "-":
_SCREAMING_SNAKE_CASE : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_SCREAMING_SNAKE_CASE : Dict = util.check_gold_parse_annotation(__lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_SCREAMING_SNAKE_CASE : List[str] = evaluate(
key_lines=__lowerCamelCase , sys_lines=__lowerCamelCase , metrics=__lowerCamelCase , NP_only=__lowerCamelCase , remove_nested=__lowerCamelCase , keep_singletons=__lowerCamelCase , min_span=__lowerCamelCase , )
return score | 325 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 325 | 1 |
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Tuple , snake_case : Optional[Any]=13 , snake_case : int=7 , snake_case : str=True , snake_case : Any=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Dict=99 , snake_case : Tuple=32 , snake_case : Dict=2 , snake_case : Dict=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : Tuple=0.1 , snake_case : int=0.1 , snake_case : Optional[int]=512 , snake_case : str=16 , snake_case : List[str]=2 , snake_case : Tuple=0.02 , snake_case : Dict=3 , snake_case : List[str]=4 , snake_case : Optional[Any]=None , ) -> int:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = 13
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = True
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Any = 99
__UpperCAmelCase : Optional[Any] = 32
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : Optional[int] = 37
__UpperCAmelCase : Optional[Any] = '''gelu'''
__UpperCAmelCase : Union[str, Any] = 0.1
__UpperCAmelCase : Dict = 0.1
__UpperCAmelCase : Tuple = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : str = 0.02
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Optional[Any] = None
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[str] , snake_case : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Tuple , snake_case : int ) -> Optional[Any]:
__UpperCAmelCase : Any = TFRoFormerModel(config=snake_case )
__UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Dict = [input_ids, input_mask]
__UpperCAmelCase : Tuple = model(snake_case )
__UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : Any , snake_case : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : int = True
__UpperCAmelCase : Any = TFRoFormerForCausalLM(config=snake_case )
__UpperCAmelCase : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Union[str, Any] = model(snake_case )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : int , snake_case : str , snake_case : Tuple , snake_case : Dict , snake_case : int , snake_case : List[str] ) -> List[Any]:
__UpperCAmelCase : List[str] = TFRoFormerForMaskedLM(config=snake_case )
__UpperCAmelCase : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : int , snake_case : Tuple , snake_case : List[Any] , snake_case : int , snake_case : List[str] , snake_case : Any ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFRoFormerForSequenceClassification(config=snake_case )
__UpperCAmelCase : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[str] , snake_case : Tuple , snake_case : List[str] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Dict , snake_case : List[str] , snake_case : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : Optional[int] = TFRoFormerForMultipleChoice(config=snake_case )
__UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : int , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : Any , snake_case : Tuple ) -> str:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[str] = TFRoFormerForTokenClassification(config=snake_case )
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : str , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=snake_case )
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCamelCase__ ( self : int , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[str] ) -> Dict:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = TFRoFormerModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(snake_case )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> Dict:
__UpperCAmelCase : Optional[int] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__UpperCAmelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(snake_case )[0]
# TODO Replace vocab size
__UpperCAmelCase : Union[str, Any] = 5_0000
__UpperCAmelCase : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCAmelCase : Tuple = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1E-4 )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 1E-4
def lowerCamelCase__ ( self : List[Any] ) -> int:
__UpperCAmelCase : Dict = tf.constant([[4, 10]] )
__UpperCAmelCase : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCAmelCase : Dict = emba(input_ids.shape )
__UpperCAmelCase : Tuple = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(snake_case , snake_case , atol=self.tolerance )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase : Tuple = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__UpperCAmelCase : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCAmelCase : Any = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case , snake_case , atol=self.tolerance )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 1E-4
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
# 2,12,16,64
__UpperCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCAmelCase : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCAmelCase : List[str] = embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCAmelCase : Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case , snake_case , snake_case )
__UpperCAmelCase : List[str] = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__UpperCAmelCase : Optional[int] = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case , atol=self.tolerance ) | 363 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple=13 ,_UpperCAmelCase : Optional[int]=30 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Optional[Any]=32 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Union[str, Any]=10 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : Optional[Any]=0.6 ,_UpperCAmelCase : str=None ,):
_a : Union[str, Any] = parent
_a : Optional[Any] = batch_size
_a : Any = image_size
_a : int = patch_size
_a : int = num_channels
_a : Tuple = is_training
_a : Any = use_labels
_a : int = hidden_size
_a : Tuple = num_hidden_layers
_a : Any = num_attention_heads
_a : List[Any] = intermediate_size
_a : Dict = hidden_act
_a : Tuple = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[Any] = type_sequence_label_size
_a : List[str] = initializer_range
_a : Optional[int] = mask_ratio
_a : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a : int = (image_size // patch_size) ** 2
_a : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : Any ):
_a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : Any = ViTMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ):
_a : List[str] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = model(_UpperCAmelCase )
_a : List[str] = (self.image_size // self.patch_size) ** 2
_a : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a : Optional[int] = 1
_a : Union[str, Any] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_UpperCAmelCase )
_a : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : int ):
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[str] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase : str = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : str = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : Dict ):
_a : Any = ViTMAEModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Optional[int] ):
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_UpperCAmelCase )
_a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ):
# make masks reproducible
np.random.seed(2 )
_a : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_a : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a : Optional[int] = torch.from_numpy(_UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a : Optional[int] = pt_noise
super().check_pt_tf_models(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : str = outputs[0].cpu().numpy()
_a : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
_a : Dict = model_class.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a : str = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
# Make sure we don't have nans
_a : Any = after_outputs[0].cpu().numpy()
_a : int = 0
_a : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : str ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Dict ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowercase ( self : Union[str, Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
@slow
def __lowercase ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = ViTMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Optional[int]:
_a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[int] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_a : int = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_UpperCAmelCase )
_a : Union[str, Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : List[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a : Dict = ViTMAEConfig()
_a : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase ,noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) )
# verify the logits
_a : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : int = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(_UpperCAmelCase ) ,atol=1E-4 ) )
| 89 |
'''simple docstring'''
__lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_snake_case = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 0 |
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: List[str] = name
lowercase__: Any = value
lowercase__: str = weight
def __repr__( self ) -> Dict:
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return self.value
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.name
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.weight
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return self.value / self.weight
def snake_case_ ( snake_case , snake_case , snake_case ) -> Tuple:
lowercase__: List[Any] = []
for i in range(len(snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: str = sorted(snake_case , key=snake_case , reverse=snake_case )
lowercase__: Dict = []
lowercase__ , lowercase__: List[str] = 0.0, 0.0
for i in range(len(snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case_ ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __a ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
'''simple docstring'''
super().__init__()
lowercase__: Union[str, Any] = pad_token_id
lowercase__: List[str] = max_length
lowercase__: int = vocab
lowercase__: List[Any] = merges
lowercase__: str = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = [' '.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
lowercase__: List[Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: int = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return cls(**lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.tf_tokenizer(lowerCAmelCase__ )
lowercase__: List[Any] = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__: int = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__: List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 288 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 0 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : int = ya
UpperCAmelCase : Dict = xa
for k in range(_lowercase ):
UpperCAmelCase : int = f(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Union[str, Any] = f(x + h , y[k] + h * ka )
UpperCAmelCase : int = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'levit'
def __init__( self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.0_2 , **A , ) -> int:
super().__init__(**A )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : Optional[int] = stride
UpperCAmelCase : Dict = padding
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : Any = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 338 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """fnet"""
def __init__(self :Dict , _UpperCamelCase :Optional[int]=3_2000 , _UpperCamelCase :Union[str, Any]=768 , _UpperCamelCase :Dict=12 , _UpperCamelCase :Union[str, Any]=3072 , _UpperCamelCase :Optional[Any]="gelu_new" , _UpperCamelCase :Any=0.1 , _UpperCamelCase :Union[str, Any]=512 , _UpperCamelCase :str=4 , _UpperCamelCase :Any=0.0_2 , _UpperCamelCase :Tuple=1e-12 , _UpperCamelCase :List[Any]=False , _UpperCamelCase :Optional[Any]=512 , _UpperCamelCase :Optional[int]=3 , _UpperCamelCase :str=1 , _UpperCamelCase :Optional[int]=2 , **_UpperCamelCase :Any , )-> Dict:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
__A = vocab_size
__A = max_position_embeddings
__A = hidden_size
__A = num_hidden_layers
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = initializer_range
__A = type_vocab_size
__A = layer_norm_eps
__A = use_tpu_fourier_optimizations
__A = tpu_short_seq_length
| 117 |
import heapq
import sys
import numpy as np
snake_case__ : Tuple = tuple[int, int]
class A_ :
def __init__(self :Union[str, Any] )-> Union[str, Any]:
__A = []
__A = set()
def _lowerCAmelCase (self :List[str] )-> str:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _lowerCAmelCase (self :Any )-> Any:
return len(self.elements ) == 0
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] )-> Optional[int]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase (self :int , _UpperCamelCase :int )-> int:
if item in self.set:
self.set.remove(_UpperCamelCase )
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase (self :Optional[Any] )-> int:
return self.elements[0][1]
def _lowerCAmelCase (self :List[str] )-> List[Any]:
((__A) , (__A)) = heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
__A = np.array(lowerCamelCase )
__A = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> List[str]:
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _a ( lowerCamelCase: TPos , lowerCamelCase: int , lowerCamelCase: TPos , lowerCamelCase: dict[TPos, float] ) -> Tuple:
'''simple docstring'''
__A = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__A = '''*'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__A = '''#'''
__A = '''-'''
__A = back_pointer[goal]
while x != start:
((__A) , (__A)) = x
# print(x)
__A = '''-'''
__A = back_pointer[x]
__A = '''-'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__A = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=''' ''' )
__A = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def _a ( lowerCamelCase: TPos ) -> Optional[Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int] , lowerCamelCase: Tuple , lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: str , lowerCamelCase: List[Any] , lowerCamelCase: Dict , ) -> Dict:
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__A) , (__A)) = s
__A = (x - 1, y)
__A = (x + 1, y)
__A = (x, y + 1)
__A = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__A = -1
__A = float('''inf''' )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__A = g_function[s] + 1
__A = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def _a ( ) -> str:
'''simple docstring'''
__A = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case__ : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case__ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case__ : List[str] = make_common_ground()
snake_case__ : int = blocks_blk
# hyper parameters
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = 1
snake_case__ : int = 20
snake_case__ : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
snake_case__ : Tuple = (0, 0)
snake_case__ : Any = (n - 1, n - 1)
snake_case__ : List[Any] = 1
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__A = {start: 0, goal: float('''inf''' )}
__A = {start: -1, goal: -1}
__A = []
__A = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__A = []
__A = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A , __A = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 117 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
__lowerCAmelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__a )
def snake_case ( self , **__a ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__a )
def snake_case ( self , **__a ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = processor(text=__a , return_tensors="np" )
__lowerCAmelCase = tokenizer(__a , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__a )
__lowerCAmelCase = ["cat", "nasa badge"]
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__a )
__lowerCAmelCase = [["cat", "nasa badge"], ["person"]]
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__a )
__lowerCAmelCase = max([len(__a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__a )
__lowerCAmelCase = ["cat", "nasa badge"]
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs["input_ids"]
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__a , query_images=__a )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 366 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=6_4 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Optional[Any]:
snake_case : List[Any] = parent
snake_case : Any = batch_size
snake_case : str = seq_length
snake_case : str = is_training
snake_case : List[Any] = use_input_mask
snake_case : Union[str, Any] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Any = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[int] = embedding_size
snake_case : str = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : List[str] = num_labels
snake_case : List[str] = num_choices
snake_case : Dict = scope
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Any = None
snake_case : Union[str, Any] = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : List[Any] = MobileBertModel(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A )
snake_case : str = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> str:
snake_case : List[str] = MobileBertForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[Any]:
snake_case : Tuple = MobileBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
snake_case : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Dict = MobileBertForPreTraining(config=A )
model.to(A )
model.eval()
snake_case : Tuple = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Tuple = MobileBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = self.num_labels
snake_case : Union[str, Any] = MobileBertForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : Optional[Any] = self.num_labels
snake_case : int = MobileBertForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : str = self.num_choices
snake_case : Dict = MobileBertForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : str = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Any = config_and_inputs
snake_case : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
def UpperCAmelCase ( self , A , A , A=False ) -> Union[str, Any]:
snake_case : str = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
snake_case : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase ( self ) -> int:
snake_case : List[Any] = MobileBertModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return torch.tensor(
lowercase ,dtype=torch.long ,device=lowercase ,)
lowerCamelCase : List[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(A )
snake_case : Optional[Any] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case : Union[str, Any] = model(A )[0]
snake_case : Tuple = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , A )
snake_case : List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
snake_case : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 124 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _a : Tuple , _a : Optional[int] , _a : List[Any] , _a : Any ):
"""simple docstring"""
A = FunnelConfig.from_json_file(_a )
print(f'Building PyTorch model from configuration: {config}' )
A = FunnelBaseModel(_a ) if base_model else FunnelModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_a , _a , _a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 77 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
if name is None:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
lowerCAmelCase_ = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , ":" , val.size() )
else:
print(__lowerCAmelCase , ":" , __lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase_ = param.view(*__lowerCAmelCase )
lowerCAmelCase_ = param.transpose(0 , 2 )
lowerCAmelCase_ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase_ = param.view(*__lowerCAmelCase )
lowerCAmelCase_ = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase_ = param.view(*__lowerCAmelCase )
return param
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = {}
# old versions did not store training args
lowerCAmelCase_ = input_state_dict.get("args" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase_ = ds_args.padded_vocab_size
lowerCAmelCase_ = ds_args.max_position_embeddings
lowerCAmelCase_ = ds_args.hidden_size
lowerCAmelCase_ = ds_args.num_layers
lowerCAmelCase_ = ds_args.num_attention_heads
lowerCAmelCase_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase_ = config.n_head
# The hidden_size per head.
lowerCAmelCase_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase_ = input_state_dict["checkpoint_version"]
else:
lowerCAmelCase_ = 0.0
# The model.
lowerCAmelCase_ = input_state_dict["model"]
# The language model.
lowerCAmelCase_ = model["language_model"]
# The embeddings.
lowerCAmelCase_ = lm["embedding"]
# The word embeddings.
lowerCAmelCase_ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase_ = word_embeddings[: config.vocab_size, :]
lowerCAmelCase_ = word_embeddings
# The position embeddings.
lowerCAmelCase_ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowerCAmelCase_ = pos_embeddings
# The transformer.
lowerCAmelCase_ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowerCAmelCase_ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowerCAmelCase_ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase_ = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase_ = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase_ = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase_ = m.group(3 )
# The name of the layer.
lowerCAmelCase_ = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowerCAmelCase_ = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowerCAmelCase_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase_ = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase_ = masked_bias
lowerCAmelCase_ = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase_ = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase_ = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
lowerCAmelCase_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase_ = megatron_to_transformers[op_name]
lowerCAmelCase_ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase_ = megatron_to_transformers[op_name]
lowerCAmelCase_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase_ = transformer["final_layernorm.weight"]
lowerCAmelCase_ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase_ = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__lowerCAmelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__lowerCAmelCase , help="An optional config json file describing the pre-trained model." , )
lowerCAmelCase_ = parser.parse_args()
# Extract the basename.
lowerCAmelCase_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )
else:
lowerCAmelCase_ = torch.load(args.path_to_checkpoint , map_location="cpu" )
lowerCAmelCase_ = input_state_dict.get("args" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase_ = "gelu_fast"
elif ds_args.openai_gelu:
lowerCAmelCase_ = "gelu_new"
else:
lowerCAmelCase_ = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase_ = "gelu_new"
# Spell out all parameters in case the defaults change.
lowerCAmelCase_ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase_ = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase_ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowerCAmelCase_ = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase_ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowerCAmelCase_ = "gpt2"
lowerCAmelCase_ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = type(__lowerCAmelCase ).__name__
lowerCAmelCase_ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
lowerCAmelCase_ = os.path.join(__lowerCAmelCase , "pytorch_model.bin" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 231 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''megatron-bert'''
def __init__( self , _UpperCamelCase=29_056 , _UpperCamelCase=1_024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4_096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
| 231 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a_ : Optional[int] , a_ : Any=13 , a_ : Tuple=7 , a_ : Union[str, Any]=True , a_ : Optional[int]=True , a_ : List[Any]=True , a_ : List[Any]=True , a_ : int=99 , a_ : str=32 , a_ : Tuple=5 , a_ : List[Any]=4 , a_ : str=37 , a_ : Tuple="gelu" , a_ : Dict=0.1 , a_ : int=0.1 , a_ : Tuple=5_12 , a_ : List[Any]=16 , a_ : Optional[int]=2 , a_ : Tuple=0.02 , a_ : Union[str, Any]=4 , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : Optional[int] = seq_length
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : int = use_attention_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = type_vocab_size
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_choices
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_attention_mask:
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : str = None
if self.use_token_type_ids:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Dict = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : str = model_class_name.from_pretrained("albert-base-v2" )
lowerCAmelCase_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowerCAmelCase_ : Optional[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase_ : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase_ : List[Any] = model(a_ , attention_mask=a_ )[0]
lowerCAmelCase_ : str = (1, 11, 7_68)
self.assertEqual(output.shape , a_ )
lowerCAmelCase_ : Union[str, Any] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 161 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCamelCase , **__UpperCamelCase )
return wrapper
| 161 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =split_dict._to_yaml_list()
assert len(lowercase ) == len(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =SplitDict._from_yaml_list(lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE_: List[str] =None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE_: Tuple =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=lowercase ), SplitInfo(dataset_name="""my_dataset""" )] )
def __magic_name__ ( lowercase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE_: List[str] =asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 173 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 325 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 325 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
SCREAMING_SNAKE_CASE__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
SCREAMING_SNAKE_CASE__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Tuple="dummy_doc" ) -> List[str]:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> str:
__lowercase = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
__lowercase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False ) -> Any:
"""simple docstring"""
__lowercase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(_UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=_UpperCAmelCase , sys_lines=_UpperCAmelCase , metrics=_UpperCAmelCase , NP_only=_UpperCAmelCase , remove_nested=_UpperCAmelCase , keep_singletons=_UpperCAmelCase , min_span=_UpperCAmelCase , )
return score
| 325 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 1 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 363 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase : List[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
lowercase : str = sylvester(number - 1 )
lowercase : Union[str, Any] = num - 1
lowercase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 53 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class _lowercase (lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ = Features({"""audio""": Audio()} )
lowercase__ = Features({"""labels""": ClassLabel} )
lowercase__ = "audio"
lowercase__ = "labels"
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase_ = copy.deepcopy(self )
UpperCamelCase_ = self.label_schema.copy()
UpperCamelCase_ = features[self.label_column]
UpperCamelCase_ = label_schema
return task_template
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 128 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
snake_case : List[Any] = datasets.logging.get_logger(__name__)
snake_case : List[str] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
snake_case : Tuple = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
snake_case : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Any="dummy_doc" ):
a__ = {doc: key_lines}
a__ = {doc: sys_lines}
a__ = {}
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ , a__ = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
a__ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
a__ , a__ = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
a__ , a__ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__ , a__ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
a__ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
a__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
a__ = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = {}
a__ = 0
a__ = 0
for name, metric in metrics:
a__ , a__ , a__ = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
a__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __lowercase ( __lowerCAmelCase : str ):
a__ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a__ = line.split()[5]
if not parse_col == "-":
a__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) ,codebase_urls=['https://github.com/ns-moosavi/coval'] ,reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] ,)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Dict=True ,__snake_case :Union[str, Any]=False ,__snake_case :Dict=False ,__snake_case :Union[str, Any]=False ) -> Tuple:
a__ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a__ = util.check_gold_parse_annotation(__snake_case )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__ = evaluate(
key_lines=__snake_case ,sys_lines=__snake_case ,metrics=__snake_case ,NP_only=__snake_case ,remove_nested=__snake_case ,keep_singletons=__snake_case ,min_span=__snake_case ,)
return score
| 240 | 0 |
import math
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_UpperCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase : Tuple = """Enter the base and the power separated by a comma: """
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = map(int, input(prompt).split(""","""))
lowerCAmelCase , lowerCAmelCase : List[Any] = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase : int = res(xa, ya)
lowerCAmelCase : Any = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 127 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase : Optional[Any] = 50003
lowerCAmelCase : List[str] = 50002
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = PLBartTokenizer
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : str):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Tuple = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: int = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: Tuple = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 4 , lowerCAmelCase__)]
self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"])
SCREAMING_SNAKE_CASE_: Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: int = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 7 , lowerCAmelCase__)]
self.assertListEqual(
lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
SCREAMING_SNAKE_CASE_: str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX'''
_UpperCAmelCase : List[str] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_UpperCAmelCase : int = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_UpperCAmelCase : Optional[Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict):
SCREAMING_SNAKE_CASE_: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : List[str]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE_: Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_: int = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0004, 5_0001])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = PLBartTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: List[Any] = targets["input_ids"]
SCREAMING_SNAKE_CASE_: List[Any] = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : int | float | str , __lowerCamelCase : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
_snake_case = int(__lowerCamelCase )
_snake_case = int(__lowerCamelCase )
_snake_case = []
for temp in range(int(__lowerCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(__lowerCamelCase ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input('Enter the last number (nth term) of the P-Series'))
UpperCAmelCase__ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 288 |
"""simple docstring"""
from math import pow
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
while b:
A_ , A_ : List[str] = b, a % b
return a
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def __snake_case ( ) -> List[Any]:
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 353 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase = np.zeros((n + 1,) )
lowerCAmelCase = ya
lowerCAmelCase = xa
for k in range(snake_case__ ):
lowerCAmelCase = f(snake_case__ , y[k] )
lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase = f(x + h , y[k] + h * ka )
lowerCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[str] =0
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : str =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : int =AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase__ : str =AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCAmelCase ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase_ , '''vocab.txt''' ) )
UpperCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='''bert''' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase_ , '''merges.txt''' ) )
UpperCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='''gpt2''' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@require_tokenizers
def _lowerCAmelCase ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase_ , '''vocab.txt''' ) )
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='''bert''' )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase_ , '''merges.txt''' ) )
UpperCamelCase__ : List[str] =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def _lowerCAmelCase ( self : int ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase__ : List[str] =tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _lowerCAmelCase ( self : List[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCamelCase__ : Dict =tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def _lowerCAmelCase ( self : Optional[Any] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCamelCase__ : Dict =TOKENIZER_MAPPING.values()
UpperCamelCase__ : int =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase_ ) , lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowercase_ )
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowercase_ )
UpperCamelCase__ : Tuple ='''Hello, world. How are you?'''
UpperCamelCase__ : List[Any] =tokenizer.tokenize(lowercase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCamelCase__ : List[str] =AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowercase_ )
UpperCamelCase__ : Tuple =tokenizer.tokenize(lowercase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowercase_ ) , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : Tuple =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : int =AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
# Check we can load the tokenizer config of an online model.
UpperCamelCase__ : Optional[Any] =get_tokenizer_config('''bert-base-cased''' )
UpperCamelCase__ : List[str] =config.pop('''_commit_hash''' , lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase__ : str =get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase__ : int =AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : int =get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def _lowerCAmelCase ( self : Tuple ):
try:
AutoConfig.register('''custom''' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
UpperCamelCase__ : List[str] =CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
try:
AutoConfig.register('''custom''' , lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : List[str] =BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : Any =CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCamelCase__ : str =AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCamelCase__ : List[str] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ )
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : List[str] =AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCamelCase__ : int =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCamelCase__ : int =AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = NewTokenizer
SCREAMING_SNAKE_CASE_ = False
try:
AutoConfig.register('''custom''' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase__ : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase__ : Tuple =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _lowerCAmelCase ( self : List[Any] ):
with self.assertRaisesRegex(
lowercase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained('''bert-base''' )
def _lowerCAmelCase ( self : Optional[int] ):
with self.assertRaisesRegex(
lowercase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCamelCase__ : Tuple =AutoTokenizer.from_pretrained(lowercase_ , revision='''aaaaaa''' )
def _lowerCAmelCase ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCamelCase__ : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 157 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Mock()
lowerCAmelCase__ : Dict = conn, Mock()
lowerCAmelCase__ : List[str] = iter([1, None] )
lowerCAmelCase__ : Optional[int] = lambda _a : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 131 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = """https://openaipublic.azureedge.net/jukebox/models/"""
__snake_case = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :int = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Union[str, Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[int] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCamelCase :Any = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCamelCase :int = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase :Any = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCamelCase :str = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Optional[int] = {}
import re
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :str = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[int] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = regex_match.groups()
UpperCamelCase :Any = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :Any = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
UpperCamelCase :List[str] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = prefix + resnet_block
UpperCamelCase :str = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = regex_match.groups()
UpperCamelCase :int = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
UpperCamelCase :str = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
UpperCamelCase :Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Optional[int] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = regex_match.groups()
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
UpperCamelCase :List[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Dict = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :Dict = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# keep original key
else:
UpperCamelCase :List[str] = original_key
UpperCamelCase :Any = replace_key(SCREAMING_SNAKE_CASE__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
UpperCamelCase :Union[str, Any] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
UpperCamelCase :List[Any] = original_key
UpperCamelCase :Any = original_key
UpperCamelCase :Optional[int] = value
return new_dict
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
UpperCamelCase :Dict = requests.get(F'''{PREFIX}{file}''' , allow_redirects=SCREAMING_SNAKE_CASE__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=SCREAMING_SNAKE_CASE__ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content )
UpperCamelCase :Optional[int] = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCamelCase :Any = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = JukeboxModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = []
UpperCamelCase :List[Any] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
UpperCamelCase :Tuple = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCamelCase :Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
UpperCamelCase :Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase :Optional[Any] = old_dic[k]
else:
UpperCamelCase :Any = old_dic[k]
UpperCamelCase :Any = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
UpperCamelCase :Dict = fix_jukebox_keys(SCREAMING_SNAKE_CASE__ , model.state_dict() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
weight_dict.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
return weight_dict
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
UpperCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE ):
snake_case_ = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_A , ''' ''' , _A )
def white_space_fix(SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )]
return (sum(_A ) / len(_A )) * 100
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case_ = Counter(_A )
snake_case_ = Counter(_A )
snake_case_ = Counter()
for sgram, scount in sgramcounter.items():
snake_case_ = scount * numref
snake_case_ = Counter(_A )
snake_case_ = Counter()
for cgram, ccount in cgramcounter.items():
snake_case_ = ccount * numref
# KEEP
snake_case_ = sgramcounter_rep & cgramcounter_rep
snake_case_ = keepgramcounter_rep & rgramcounter
snake_case_ = sgramcounter_rep & rgramcounter
snake_case_ = 0
snake_case_ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ = 1
snake_case_ = 1
if len(_A ) > 0:
snake_case_ = keeptmpscorea / len(_A )
if len(_A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case_ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case_ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case_ = sgramcounter_rep - cgramcounter_rep
snake_case_ = delgramcounter_rep - rgramcounter
snake_case_ = sgramcounter_rep - rgramcounter
snake_case_ = 0
snake_case_ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ = 1
if len(_A ) > 0:
snake_case_ = deltmpscorea / len(_A )
# ADDITION
snake_case_ = set(_A ) - set(_A )
snake_case_ = set(_A ) & set(_A )
snake_case_ = set(_A ) - set(_A )
snake_case_ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ = 1
snake_case_ = 1
if len(_A ) > 0:
snake_case_ = addtmpscore / len(_A )
if len(_A ) > 0:
snake_case_ = addtmpscore / len(_A )
snake_case_ = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
snake_case_ = len(_A )
snake_case_ = ssent.split(''' ''' )
snake_case_ = csent.split(''' ''' )
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
snake_case_ = []
for rsent in rsents:
snake_case_ = rsent.split(''' ''' )
snake_case_ = []
snake_case_ = []
snake_case_ = []
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
snake_case_ = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(_A )
if i < len(_A ) - 2:
snake_case_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(_A )
if i < len(_A ) - 3:
snake_case_ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
snake_case_ = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(_A )
if i < len(_A ) - 2:
snake_case_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(_A )
if i < len(_A ) - 3:
snake_case_ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
snake_case_ = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(_A )
if i < len(_A ) - 2:
snake_case_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(_A )
if i < len(_A ) - 3:
snake_case_ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(_A )
(snake_case_) = SARIngram(_A , _A , _A , _A )
(snake_case_) = SARIngram(_A , _A , _A , _A )
(snake_case_) = SARIngram(_A , _A , _A , _A )
(snake_case_) = SARIngram(_A , _A , _A , _A )
snake_case_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case_ = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case_ = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case_ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "13a" , SCREAMING_SNAKE_CASE = True )-> Dict:
"""simple docstring"""
if lowercase:
snake_case_ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case_ = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A )
else:
snake_case_ = sacrebleu.TOKENIZERS[tokenizer]()(_A )
elif tokenizer == "moses":
snake_case_ = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A )
elif tokenizer == "penn":
snake_case_ = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A )
else:
snake_case_ = sentence
if not return_str:
snake_case_ = normalized_sent.split()
return normalized_sent
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not (len(_A ) == len(_A ) == len(_A )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
snake_case_ = 0
for src, pred, refs in zip(_A , _A , _A ):
sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] )
snake_case_ = sari_score / len(_A )
return 100 * sari_score
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="exp" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , )-> Optional[Any]:
"""simple docstring"""
snake_case_ = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
snake_case_ = [[refs[i] for refs in references] for i in range(_A )]
snake_case_ = sacrebleu.corpus_bleu(
_A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = {}
result.update({'''sari''': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} )
result.update({'''exact''': compute_em(predictions=__snake_case , references=__snake_case )} )
return result | 358 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = '''</s>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_UpperCAmelCase ) , 11_03 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
snake_case_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
snake_case_ = '''To ensure a smooth flow of bank resolutions.'''
snake_case_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 1_50, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase__ ( self ):
# fmt: off
snake_case_ = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 10_00, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def UpperCamelCase__ ( self ):
snake_case_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
snake_case_ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , ) | 267 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = "fnet"
def __init__( self , a=3_2_0_0_0 , a=7_6_8 , a=1_2 , a=3_0_7_2 , a="gelu_new" , a=0.1 , a=5_1_2 , a=4 , a=0.02 , a=1e-12 , a=False , a=5_1_2 , a=3 , a=1 , a=2 , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : Any = vocab_size
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : str = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : str = layer_norm_eps
lowercase__ : str = use_tpu_fourier_optimizations
lowercase__ : Optional[int] = tpu_short_seq_length
| 77 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "mgp-str"
def __init__( self , a=[3_2, 1_2_8] , a=4 , a=3 , a=2_7 , a=3_8 , a=5_0_2_5_7 , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=4.0 , a=True , a=False , a=1e-5 , a=0.0 , a=0.0 , a=0.0 , a=False , a=0.02 , **a , ) -> Tuple:
super().__init__(**a )
lowercase__ : int = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = max_token_length
lowercase__ : Dict = num_character_labels
lowercase__ : Optional[int] = num_bpe_labels
lowercase__ : Dict = num_wordpiece_labels
lowercase__ : Tuple = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = mlp_ratio
lowercase__ : Optional[int] = distilled
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[int] = drop_rate
lowercase__ : List[str] = qkv_bias
lowercase__ : Optional[int] = attn_drop_rate
lowercase__ : Any = drop_path_rate
lowercase__ : List[Any] = output_aa_attentions
lowercase__ : Tuple = initializer_range
| 77 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = None
__snake_case = BloomTokenizerFast
__snake_case = BloomTokenizerFast
__snake_case = True
__snake_case = False
__snake_case = 'tokenizer_file'
__snake_case = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def UpperCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_SCREAMING_SNAKE_CASE : Optional[int] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
_SCREAMING_SNAKE_CASE : List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_encode_plus(__lowerCamelCase )["input_ids"]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase=6 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_SCREAMING_SNAKE_CASE : Tuple = "This is a simple input"
_SCREAMING_SNAKE_CASE : str = ["This is a simple input 1", "This is a simple input 2"]
_SCREAMING_SNAKE_CASE : Dict = ("This is a simple input", "This is a pair")
_SCREAMING_SNAKE_CASE : Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
_SCREAMING_SNAKE_CASE : List[str] = None # Hotfixing padding = None
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = next(iter(__lowerCamelCase ) )["premise"] # pick up one data
_SCREAMING_SNAKE_CASE : Optional[Any] = list(sample_data.values() )
_SCREAMING_SNAKE_CASE : Optional[int] = list(map(tokenizer.encode , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = [tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) for x in output_tokens]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 325 |
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 325 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case ( UpperCAmelCase )-> None:
"""simple docstring"""
create_state_space_tree(UpperCAmelCase , [] , 0 )
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> None:
"""simple docstring"""
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
a__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 161 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :str , _A :Dict ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self :List[str] , _A :int = 1 , _A :Optional[torch.Generator] = None , _A :int = 50 , _A :Optional[str] = "pil" , _A :bool = True , **_A :Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(_A , _A , _A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 161 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : List[str] , __a : Tuple , __a : Tuple=7 , __a : Union[str, Any]=3 , __a : Tuple=30 , __a : str=400 , __a : Optional[Any]=True , __a : int=None , __a : Dict=True , __a : Dict=[0.5, 0.5, 0.5] , __a : List[Any]=[0.5, 0.5, 0.5] , __a : str=True , __a : List[str]=1 / 255 , __a : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def _lowercase (self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase (self : Optional[Any] , __a : Optional[Any] , __a : List[str]=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(__a , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase_ = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = self.size["shortest_edge"]
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__a , key=lambda __a : item[0] )[0]
UpperCAmelCase_ = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : int = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase (self : List[str] ):
UpperCAmelCase_ = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase (self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __a )
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __a )
def _lowercase (self : Any ):
pass
def _lowercase (self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase (self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase (self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase (self : Dict ):
# prepare image and target
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"image_id": 39769, "annotations": target}
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
UpperCAmelCase_ = image_processing(images=__a , annotations=__a , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __a )
UpperCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
UpperCAmelCase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
@slow
def _lowercase (self : List[str] ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
UpperCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor(format="coco_panoptic" )
UpperCAmelCase_ = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __a )
UpperCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
UpperCAmelCase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify masks
UpperCAmelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
| 106 | '''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 106 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 1 |
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ) -> str:
a_ : Optional[int] = value
a_ : Node | None = None # Added in order to delete a node easier
a_ : Node | None = None
a_ : Node | None = None
def __repr__( self : Dict ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any = None ) -> Optional[int]:
a_ : str = root
def __str__( self : Tuple ) -> str:
return str(self.root )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
if new_children is not None: # reset its kids
a_ : str = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
a_ : Optional[int] = new_children
else:
a_ : Tuple = new_children
else:
a_ : Union[str, Any] = new_children
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.root is None
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
a_ : Tuple = Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
a_ : Optional[int] = new_node # set its root
else: # Tree is not empty
a_ : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a_ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
a_ : Any = parent_node.left
else:
if parent_node.right is None:
a_ : str = new_node
break
else:
a_ : Any = parent_node.right
a_ : Tuple = parent_node
def SCREAMING_SNAKE_CASE ( self : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> None:
for value in values:
self.__insert(__lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
a_ : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a_ : List[Any] = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] = None ) -> Node | None:
if node is None:
if self.root is None:
return None
a_ : Union[str, Any] = self.root
if not self.empty():
while node.right is not None:
a_ : str = node.right
return node
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any = None ) -> Node | None:
if node is None:
a_ : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
a_ : Dict = self.root
while node.left is not None:
a_ : List[str] = node.left
return node
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
a_ : List[Any] = self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
a_ : List[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a_ : Tuple = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> None:
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
a_ : list[int] = []
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = []
if curr_node is not None:
a_ : List[str] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a_ : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(__A )
# Prints all the elements of the list in order traversal
print(__A )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__A )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 367 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Any = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(__a )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self , """vision""")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def snake_case_ ( self , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = load_image(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]])
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors="""pt""")
if self.tokenizer is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""")
__SCREAMING_SNAKE_CASE = target_size
return inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""")
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = outputs.__class__({"""target_size""": target_size, **outputs})
if self.tokenizer is not None:
__SCREAMING_SNAKE_CASE = model_inputs["""bbox"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0.9):
__SCREAMING_SNAKE_CASE = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(lowerCAmelCase__):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
]))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model_outputs["""logits"""].squeeze(0).softmax(dim=-1).max(dim=-1)
__SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__SCREAMING_SNAKE_CASE = [unnormalize(lowerCAmelCase__) for bbox in model_outputs["""bbox"""].squeeze(0)]
__SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
__SCREAMING_SNAKE_CASE = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__)) for vals in zip(scores.tolist() , lowerCAmelCase__ , lowerCAmelCase__) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = raw_annotations[0]
__SCREAMING_SNAKE_CASE = raw_annotation["""scores"""]
__SCREAMING_SNAKE_CASE = raw_annotation["""labels"""]
__SCREAMING_SNAKE_CASE = raw_annotation["""boxes"""]
__SCREAMING_SNAKE_CASE = scores.tolist()
__SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
__SCREAMING_SNAKE_CASE = [self._get_bounding_box(lowerCAmelCase__) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
__SCREAMING_SNAKE_CASE = [
dict(zip(lowerCAmelCase__ , lowerCAmelCase__))
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""])
]
return annotation
def snake_case_ ( self , lowerCAmelCase__):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""")
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = box.int().tolist()
__SCREAMING_SNAKE_CASE = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 100 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ''''''
lowerCAmelCase :str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase :str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase :str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase):
super().__init__(self , **_lowerCamelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : Optional[Any] = fsspec.open(
_lowerCamelCase , mode="""rb""" , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ : List[Any] = os.path.basename(self.file.path.split("""::""")[0])
UpperCAmelCase__ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : Tuple = None
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase).lstrip("""/""")
def snake_case__ ( self):
if self.dir_cache is None:
UpperCAmelCase__ : Optional[Any] = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
UpperCAmelCase__ : Union[str, Any] = {f["""name"""]: f}
def snake_case__ ( self , _lowerCamelCase):
return self.file.open().read()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self._strip_protocol(_lowerCamelCase)
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _snake_case ( a__ ):
lowerCAmelCase :Dict = '''bz2'''
lowerCAmelCase :List[str] = '''bz2'''
lowerCAmelCase :Dict = '''.bz2'''
class _snake_case ( a__ ):
lowerCAmelCase :int = '''gzip'''
lowerCAmelCase :Tuple = '''gzip'''
lowerCAmelCase :str = '''.gz'''
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = '''lz4'''
lowerCAmelCase :Any = '''lz4'''
lowerCAmelCase :int = '''.lz4'''
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = '''xz'''
lowerCAmelCase :int = '''xz'''
lowerCAmelCase :List[Any] = '''.xz'''
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = '''zstd'''
lowerCAmelCase :List[str] = '''zstd'''
lowerCAmelCase :Union[str, Any] = '''.zst'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , ):
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : Dict = self.file.__enter__
class _snake_case :
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = file_
def __enter__( self):
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase):
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase)
def __iter__( self):
return iter(self._file)
def snake_case__ ( self):
return next(self._file)
def __getattr__( self , _lowerCamelCase):
return getattr(self._file , _lowerCamelCase)
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase))
UpperCAmelCase__ : List[Any] = fixed_enter | 283 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE : Optional[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : str = logging.getLogger()
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="eval" ):
"""simple docstring"""
snake_case = os.path.join(UpperCamelCase_ ,F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ ,'''r''' ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_flax_glue.main()
snake_case = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_clm_flax.main()
snake_case = get_results(__snake_case )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_summarization_flax.main()
snake_case = get_results(__snake_case , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_mlm_flax.main()
snake_case = get_results(__snake_case )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_ta_mlm_flax.main()
snake_case = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def a_ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_flax_ner.main()
snake_case = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def a_ ( self ):
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__snake_case , '''argv''' , __snake_case ):
run_qa.main()
snake_case = get_results(__snake_case )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 127 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = BlenderbotSmallTokenizer
__magic_name__ = False
def a_ ( self ):
super().setUp()
snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = '''adapt act apte'''
snake_case = '''adapt act apte'''
return input_text, output_text
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''adapt act apte'''
snake_case = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
snake_case = '''I am a small frog.'''
snake_case = tok([src_text] , padding=__snake_case , truncation=__snake_case )['''input_ids''']
snake_case = tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case = '''I am a small frog .'''
snake_case = '''.'''
snake_case = tok(__snake_case )['''input_ids''']
snake_case = tok(__snake_case )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 127 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = StableDiffusionDiffEditPipeline
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__lowercase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__lowercase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : Optional[Any] = frozenset([] )
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_zero=lowerCAmelCase__ , )
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""")
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""")
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self):
if not hasattr(self.pipeline_class , """_optional_components"""):
return
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(lowerCAmelCase__)
pipe_loaded.to(lowerCAmelCase__)
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase__)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase__ , lowerCAmelCase__) is None , f"`{optional_component}` did not stay set to None after loading." , )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe_loaded(**lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(lowerCAmelCase__ , 1E-4)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.generate_mask(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6))
__SCREAMING_SNAKE_CASE = np.array([0] * 9)
__SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.invert(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3))
__SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3)
def snake_case_ ( self):
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = {"""beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """beta_schedule""": """scaled_linear"""}
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.invert(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3))
__SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def snake_case_ ( cls):
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""")
__SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""").resize((7_6_8, 7_6_8))
__SCREAMING_SNAKE_CASE = raw_image
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
__SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """a bowl of fruit"""
__SCREAMING_SNAKE_CASE = """a bowl of pears"""
__SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase__ , target_prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.invert(
prompt=lowerCAmelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase__).latents
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_latents=lowerCAmelCase__ , generator=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
__SCREAMING_SNAKE_CASE = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""").resize((7_6_8, 7_6_8)))
/ 2_5_5
)
assert np.abs((expected_image - image).max()) < 5E-1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """a bowl of fruit"""
__SCREAMING_SNAKE_CASE = """a bowl of pears"""
__SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase__ , target_prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.invert(
prompt=lowerCAmelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase__ , num_inference_steps=2_5 , ).latents
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_latents=lowerCAmelCase__ , generator=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="""numpy""" , ).images[0]
__SCREAMING_SNAKE_CASE = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""").resize((7_6_8, 7_6_8)))
/ 2_5_5
)
assert np.abs((expected_image - image).max()) < 5E-1
| 255 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = XGLMTokenizer
__lowercase : int = XGLMTokenizerFast
__lowercase : Optional[Any] = True
__lowercase : str = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_8)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
def snake_case_ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
__SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase__ , )
| 255 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
a__ : int = """CIDAS/clipseg-rd64-refined"""
a__ : List[Any] = """image_segmenter"""
a__ : str = CLIPSegForImageSegmentation
a__ : List[Any] = ["""image""", """text"""]
a__ : int = ["""image"""]
def __init__(self : Optional[int] , *__a : int , **__a : Dict ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : int , __a : "Image" , __a : str ):
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Optional[int] ):
with torch.no_grad():
UpperCAmelCase_ = self.model(**__a ).logits
return logits
def _lowercase (self : int , __a : Any ):
UpperCAmelCase_ = outputs.cpu().detach().numpy()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 70 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowercase : Tuple = datasets.load_iris()
_lowercase : List[str] = np.array(data["data"])
_lowercase : Union[str, Any] = np.array(data["target"])
_lowercase : int = data["""target_names"""]
_lowercase : Optional[Any] = train_test_split(X, y)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Tuple:
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=5 ) -> Union[str, Any]:
lowercase_ : List[str] = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
lowercase_ : Optional[Any] = []
for data_point in data:
lowercase_ : Optional[int] = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowercase_ : Optional[Any] = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase_ : Tuple = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 356 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
def __init__( self: int , *__lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 157 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _snake_case ( _lowercase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=13 , __lowerCamelCase: Any=7 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: int=5_12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : List[str] = embedding_size
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = TFMobileBertModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : Dict = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(__lowerCamelCase )[0]
__UpperCAmelCase : Any = [1, 6, 3_05_22]
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : str = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 157 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A ) -> str:
super().__init__()
a =nn.ModuleList(__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
a , a =controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
a , a =down_samples, mid_sample
else:
a =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Any:
a =0
a =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
a =model_path_to_save + f'''_{idx}'''
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> Tuple:
a =0
a =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
a =pretrained_model_path
while os.path.isdir(__A ):
a =ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
a =pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(__A )} controlnets loaded from {pretrained_model_path}.''' )
if len(__A ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(__A )
| 353 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =state_dict.pop(lowercase )
a =val
def _A ( lowercase ):
"""simple docstring"""
a =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
a =value
else:
a =value
return new_state_dict
def _A ( lowercase ):
"""simple docstring"""
a =''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a =state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a =in_proj_weight_cross_attn[:2_56, :]
a =in_proj_bias_cross_attn[:2_56]
a =in_proj_weight_cross_attn[2_56:5_12, :]
a =in_proj_bias_cross_attn[2_56:5_12]
a =in_proj_weight_cross_attn[-2_56:, :]
a =in_proj_bias_cross_attn[-2_56:]
def _A ( lowercase , lowercase ):
"""simple docstring"""
a , a =image.size
a =max(lowercase , lowercase )
a =8_00 if '''detection''' in checkpoint_url else 10_00
a =target_max_size / current_max_size
a =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( lowercase ):
"""simple docstring"""
a =F.to_tensor(lowercase )
a =F.normalize(lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
a =torch.hub.load_state_dict_from_url(lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
a =rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a ='''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a =state_dict.pop(lowercase )
a =val
# create HuggingFace model and load state dict
a =TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a =15
a =2
a ={0: '''table''', 1: '''table rotated'''}
a =idalabel
a ={v: k for k, v in idalabel.items()}
else:
a =1_25
a =6
a ={
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
a =TableTransformerForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify our conversion
a ='''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
a =hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase )
a =Image.open(lowercase ).convert('''RGB''' )
a =normalize(resize(lowercase , lowercase ) ).unsqueeze(0 )
a =model(lowercase )
if "detection" in checkpoint_url:
a =(1, 15, 3)
a =torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
a =torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
a =(1, 1_25, 7)
a =torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
a =torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
a =(
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase )
image_processor.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 215 | 0 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =DistilBertTokenizer
lowerCamelCase : Optional[Any] =DistilBertTokenizerFast
lowerCamelCase : List[Any] =True
@slow
def __a ( self ) -> Any:
a : Dict = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
a : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 105 |
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
if n == 0:
return 0
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
__SCREAMING_SNAKE_CASE = max_revenue
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max_rev[i]
for j in range(1 , i + 1 ):
__SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] )
__SCREAMING_SNAKE_CASE = max_revenue_i
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
if n < 0:
__SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(a__ )
if n > len(a__ ):
__SCREAMING_SNAKE_CASE = (
"""Each integral piece of rod must have a corresponding price. """
F'Got n = {n} but length of prices = {len(a__ )}'
)
raise ValueError(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23]
__SCREAMING_SNAKE_CASE = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__SCREAMING_SNAKE_CASE = 36
__SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 267 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case (__lowercase , __lowercase=False ) -> str:
'''simple docstring'''
_snake_case : int = OmegaConf.load(__lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(__lowercase ) ) )
return config
def snake_case (__lowercase , __lowercase=None , __lowercase=None ) -> Tuple:
'''simple docstring'''
if conf_path is None:
_snake_case : Optional[Any] = "./model_checkpoints/vqgan_only.yaml"
_snake_case : List[str] = load_config(__lowercase , display=__lowercase )
_snake_case : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
_snake_case : Dict = "./model_checkpoints/vqgan_only.pt"
_snake_case : Union[str, Any] = torch.load(__lowercase , map_location=__lowercase )
if ".ckpt" in ckpt_path:
_snake_case : List[str] = sd["state_dict"]
model.load_state_dict(__lowercase , strict=__lowercase )
model.to(__lowercase )
del sd
return model
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : Any = model.encode(__lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_snake_case : str = model.decode(__lowercase )
return xrec
def snake_case (__lowercase , __lowercase=False ) -> Optional[Any]:
'''simple docstring'''
_snake_case ,_snake_case : Dict = string.rsplit("." , 1 )
if reload:
_snake_case : Optional[int] = importlib.import_module(__lowercase )
importlib.reload(__lowercase )
return getattr(importlib.import_module(__lowercase , package=__lowercase ) , cls )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def snake_case (__lowercase , __lowercase , __lowercase=True , __lowercase=True ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[int] = instantiate_from_config(__lowercase )
if sd is not None:
model.load_state_dict(__lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if ckpt:
_snake_case : str = torch.load(__lowercase , map_location="cpu" )
_snake_case : int = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
_snake_case : Optional[int] = {"state_dict": None}
_snake_case : List[Any] = None
_snake_case : List[str] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__lowercase , eval_mode=__lowercase )["model"]
return model, global_step | 284 | import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'M-CLIP'
def __init__( self , lowercase_=1_024 , lowercase_=768 , **lowercase_ ):
_snake_case : str = transformerDimSize
_snake_case : Union[str, Any] = imageDimSize
super().__init__(**lowercase_ )
class lowercase_ ( __snake_case ):
_lowerCamelCase = MCLIPConfig
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : List[Any] = XLMRobertaModel(lowercase_ )
_snake_case : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs | 284 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Any = BloomTokenizerFast
lowerCAmelCase__ : Union[str, Any] = BloomTokenizerFast
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Dict = "tokenizer_file"
lowerCAmelCase__ : str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : List[Any] , **_UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__lowercase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__lowercase = tokenizer.batch_encode_plus(_UpperCAmelCase )['input_ids']
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str]=6 ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase = 'This is a simple input'
__lowercase = ['This is a simple input 1', 'This is a simple input 2']
__lowercase = ('This is a simple input', 'This is a pair')
__lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__lowercase = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_UpperCAmelCase )
__lowercase = next(iter(_UpperCAmelCase ) )['premise'] # pick up one data
__lowercase = list(sample_data.values() )
__lowercase = list(map(tokenizer.encode , _UpperCAmelCase ) )
__lowercase = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : list = None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__SCREAMING_SNAKE_CASE = os.path.abspath("""examples""" )
for item in os.listdir(__SCREAMING_SNAKE_CASE ):
if item not in EXCLUDE_EXAMPLES:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ) and ".py" in item_path:
with self.subTest(
tested_script=__SCREAMING_SNAKE_CASE , feature_script=__SCREAMING_SNAKE_CASE , tested_section="""main()""" if parser_only else """training_function()""" , ):
__SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\n""".join(__SCREAMING_SNAKE_CASE )
if special_strings is not None:
for string in special_strings:
__SCREAMING_SNAKE_CASE = diff.replace(__SCREAMING_SNAKE_CASE , """""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , """""" )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __SCREAMING_SNAKE_CASE )
self.one_complete_example("""complete_nlp_example.py""" , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__SCREAMING_SNAKE_CASE = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.one_complete_example("""complete_cv_example.py""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = False
@classmethod
def UpperCAmelCase__ ( cls : List[str] ) -> str:
"""simple docstring"""
super().setUpClass()
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__SCREAMING_SNAKE_CASE = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls : Any ) -> Dict:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
self.assertNotIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
else:
self.assertIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = re.findall("""({.+})""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [r for r in results if """accuracy""" in r][-1]
__SCREAMING_SNAKE_CASE = ast.literal_eval(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """tracking""" ) ) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowerCAmelCase__ : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert('''RGB''' )
return image
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = dct.pop(A_ )
lowerCAmelCase__ : int = val
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ : Dict = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase__ : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase__ : Dict = torch.cat((q_bias, torch.zeros_like(A_ , requires_grad=A_ ), v_bias) )
lowerCAmelCase__ : List[Any] = qkv_bias
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[Any] = 3_64 if '''coco''' in model_name else 2_24
lowerCAmelCase__ : Any = BlipaVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase__ : Dict = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=A_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase__ : Optional[int] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=A_ ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase__ : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
lowerCAmelCase__ : Any = BlipaConfig(vision_config=A_ , text_config=A_ )
return config, image_size
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_=None , A_=False ):
lowerCAmelCase__ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowerCAmelCase__ : Optional[Any] = tokenizer('''\n''' , add_special_tokens=A_ ).input_ids[0]
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = get_blipa_config(A_ , eos_token_id=A_ )
lowerCAmelCase__ : Optional[int] = BlipaForConditionalGeneration(A_ ).eval()
lowerCAmelCase__ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCAmelCase__ : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = load_model_and_preprocess(
name=A_ , model_type=A_ , is_eval=A_ , device=A_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCAmelCase__ : Dict = original_model.state_dict()
lowerCAmelCase__ : Optional[int] = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ : Optional[int] = state_dict.pop(A_ )
if key.startswith('''Qformer.bert''' ):
lowerCAmelCase__ : Optional[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowerCAmelCase__ : Optional[int] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
lowerCAmelCase__ : List[str] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowerCAmelCase__ : str = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
lowerCAmelCase__ : Tuple = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
lowerCAmelCase__ : Union[str, Any] = key.replace('''t5''' , '''language''' )
lowerCAmelCase__ : str = val
# read in qv biases
read_in_q_v_bias(A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = hf_model.load_state_dict(A_ , strict=A_ )
assert len(A_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase__ : int = load_demo_image()
lowerCAmelCase__ : List[Any] = vis_processors['''eval'''](A_ ).unsqueeze(0 ).to(A_ )
lowerCAmelCase__ : List[str] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(A_ )
# create processor
lowerCAmelCase__ : int = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=A_ , image_std=A_ )
lowerCAmelCase__ : int = BlipaProcessor(image_processor=A_ , tokenizer=A_ )
lowerCAmelCase__ : List[str] = processor(images=A_ , return_tensors='''pt''' ).pixel_values.to(A_ )
# make sure processor creates exact same pixel values
assert torch.allclose(A_ , A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase__ : str = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowerCAmelCase__ : Union[str, Any] = hf_model(A_ , A_ ).logits
else:
lowerCAmelCase__ : Optional[Any] = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowerCAmelCase__ : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowerCAmelCase__ : List[str] = hf_model(A_ , A_ , labels=A_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=A_ )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=A_ )
else:
# cast to same type
lowerCAmelCase__ : Dict = logits.dtype
assert torch.allclose(original_logits.to(A_ ) , A_ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowerCAmelCase__ : Union[str, Any] = ''''''
lowerCAmelCase__ : Optional[int] = tokenizer(A_ , return_tensors='''pt''' ).input_ids.to(A_ )
lowerCAmelCase__ : List[str] = original_model.generate({'''image''': original_pixel_values} )
lowerCAmelCase__ : Union[str, Any] = hf_model.generate(
A_ , A_ , do_sample=A_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , A_ )
lowerCAmelCase__ : Tuple = input_ids.shape[1]
lowerCAmelCase__ : List[str] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=A_ )
lowerCAmelCase__ : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
__UpperCamelCase : List[Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 106 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(A_ , '''config.json''' ) ):
os.remove(os.path.join(A_ , '''config.json''' ) )
if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : Optional[Any] = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ )
lowerCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( A_ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device )
lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) ,) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A_ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ )
lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : str = float('''Inf''' )
lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : int = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ )
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
lowerCAmelCase__ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
lowerCAmelCase__ : Optional[Any] = 1 / loss
lowerCAmelCase__ : Tuple = datetime.now() - before_time
lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
lowerCAmelCase__ : int = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A_ , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A_ , default=42 )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A_ )
# Prepare dataset
lowerCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),)
lowerCAmelCase__ : Tuple = TensorDataset(*A_ )
lowerCAmelCase__ : Optional[int] = RandomSampler(A_ )
lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 106 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase ( ctypes.Structure ):
"""simple docstring"""
# _fields is a specific attr expected by ctypes
SCREAMING_SNAKE_CASE_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : Dict = CursorInfo()
_lowercase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : Optional[int] = CursorInfo()
_lowercase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=2 , a_=3 , a_=16 , a_=[32, 64, 1_28] , a_=[1, 2, 1] , a_=[2, 2, 4] , a_=2 , a_=2.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=True , a_=0.02 , a_=1E-5 , a_=True , a_=None , a_=True , a_=10 , a_=8 , a_=["stage1", "stage2"] , a_=[1, 2] , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : Any = batch_size
__snake_case : str = image_size
__snake_case : int = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : Tuple = embed_dim
__snake_case : Tuple = hidden_sizes
__snake_case : Tuple = depths
__snake_case : str = num_heads
__snake_case : Optional[int] = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = qkv_bias
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = drop_path_rate
__snake_case : List[str] = hidden_act
__snake_case : Dict = use_absolute_embeddings
__snake_case : Any = patch_norm
__snake_case : Tuple = layer_norm_eps
__snake_case : int = initializer_range
__snake_case : Tuple = is_training
__snake_case : Tuple = scope
__snake_case : List[Any] = use_labels
__snake_case : Dict = type_sequence_label_size
__snake_case : Any = encoder_stride
__snake_case : List[Any] = out_features
__snake_case : List[Any] = out_indices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = FocalNetModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
__snake_case : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = FocalNetBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Tuple = None
__snake_case : List[str] = FocalNetBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : str = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = FocalNetForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : str = 1
__snake_case : List[Any] = FocalNetForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.type_sequence_label_size
__snake_case : Dict = FocalNetForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[int] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : List[str] = 1
__snake_case : Tuple = FocalNetForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = FocalNetModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , embed_dim=37 , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : List[Any] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] = model_class(a_ )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) , a_ )
# FocalNet has a different seq_length
__snake_case : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) , a_ )
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = reshaped_hidden_states[0].shape
__snake_case : Any = (
reshaped_hidden_states[0].view(a_ , a_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : str = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = 3
__snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = FocalNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Union[str, Any] = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(a_ )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__snake_case : Optional[int] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**a_ )
# verify the logits
__snake_case : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__ =FocalNetConfig
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = FocalNetModelTester(self )
| 102 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : str , A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """"""
for word_or_phrase in separated:
if not isinstance(A__ , A__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120 | 0 |
def __a ( lowerCAmelCase_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_= [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,lowerCAmelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 277 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= emb.weight.shape
UpperCAmelCase_= nn.Linear(lowerCAmelCase_ ,lowerCAmelCase_ ,bias=lowerCAmelCase_ )
UpperCAmelCase_= emb.weight.data
return lin_layer
def __a ( lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_= checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase_ )
UpperCAmelCase_= state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_= {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_= XGLMConfig(
vocab_size=lowerCAmelCase_ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
UpperCAmelCase_= XGLMForCausalLM(lowerCAmelCase_ )
UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
UpperCAmelCase_= make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 277 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
lowerCamelCase : int = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "dataset_info.json" ) )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Tuple = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase : Tuple = yaml.safe_dump(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = yaml.safe_load(SCREAMING_SNAKE_CASE_ )
assert dataset_info_yaml_dict == reloaded
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : List[Any] = DatasetInfo()
lowerCamelCase : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase : Optional[int] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) )
| 283 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase_ ( a_ ):
_A : Dict = 'unispeech'
def __init__( self , snake_case__=32 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=1_28 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=3_20 , snake_case__=2 , snake_case__=0.1 , snake_case__=1_00 , snake_case__=2_56 , snake_case__=2_56 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=2_56 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=0.5 , **snake_case__ , ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = num_ctc_classes
UpperCAmelCase = vocab_size
UpperCAmelCase = do_stable_layer_norm
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase = num_codevectors_per_group
UpperCAmelCase = num_codevector_groups
UpperCAmelCase = contrastive_logits_temperature
UpperCAmelCase = feat_quantizer_dropout
UpperCAmelCase = num_negatives
UpperCAmelCase = codevector_dim
UpperCAmelCase = proj_codevector_dim
UpperCAmelCase = diversity_loss_weight
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# pretraining loss
UpperCAmelCase = replace_prob
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 248 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase: Optional[Any] = get_tests_dir('fixtures')
_UpperCamelCase: List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_UpperCamelCase: Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class a__ ( unittest.TestCase ):
def lowercase ( self : Tuple ) -> str:
lowercase : Union[str, Any] = 0
def lowercase ( self : Any ) -> Any:
lowercase : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Dict ) -> Optional[Any]:
lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : int ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowercase : Optional[int] = WavaVecaFeatureExtractor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : List[str] ) -> Optional[Any]:
lowercase : List[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
lowerCAmelCase, 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase : int = AutoFeatureExtractor.from_pretrained('bert-base' )
def lowercase ( self : Dict ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase, revision='aaaaaa' )
def lowercase ( self : Tuple ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase ( self : str ) -> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
lowercase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
lowercase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase, trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Optional[Any] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowercase : int = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self : Tuple ) -> Dict:
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = True
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowerCAmelCase, 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 255 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Any:
lowercase : int = torch.nn.Linear(10, 10 )
lowercase : Optional[int] = torch.optim.SGD(model.parameters(), 0.1 )
lowercase : List[Any] = Accelerator()
lowercase : Optional[Any] = accelerator.prepare(lowerCAmelCase )
try:
pickle.loads(pickle.dumps(lowerCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 255 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
UpperCamelCase = model.state_dict()
UpperCamelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
UpperCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
UpperCamelCase = state_dict["""cls.predictions.decoder.weight"""]
UpperCamelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F'''cls.predictions.transform.dense.{w}''']
UpperCamelCase = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 65 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[Any] = accelerate_config_file
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
A_ : Any = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
A_ : Optional[Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ : Union[str, Any] = '''not installed'''
A_ : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
A_ : str = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[Any] = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
A_ : Optional[int] = '''not installed'''
A_ : str = '''NA'''
if is_torch_available():
import torch
A_ : Tuple = torch.__version__
A_ : List[Any] = torch.cuda.is_available()
A_ : int = '''not installed'''
A_ : Any = '''NA'''
if is_tf_available():
import tensorflow as tf
A_ : str = tf.__version__
try:
# deprecated in v2.1
A_ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
A_ : Union[str, Any] = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Union[str, Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Tuple = flax.__version__
A_ : List[Any] = jax.__version__
A_ : List[Any] = jaxlib.__version__
A_ : Dict = jax.lib.xla_bridge.get_backend().platform
A_ : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 65 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=sys.maxsize ) -> str:
_lowercase ='''bilinear'''
_lowercase =max_size
_lowercase =short_edge_length
def __call__(self , UpperCAmelCase ) -> int:
_lowercase =[]
for img in imgs:
_lowercase , _lowercase =img.shape[:2]
# later: provide list and randomly choose index for resize
_lowercase =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowercase =size * 1.0 / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
_lowercase , _lowercase =size, scale * w
else:
_lowercase , _lowercase =scale * h, size
if max(UpperCAmelCase , UpperCAmelCase ) > self.max_size:
_lowercase =self.max_size * 1.0 / max(UpperCAmelCase , UpperCAmelCase )
_lowercase =newh * scale
_lowercase =neww * scale
_lowercase =int(neww + 0.5 )
_lowercase =int(newh + 0.5 )
if img.dtype == np.uinta:
_lowercase =Image.fromarray(UpperCAmelCase )
_lowercase =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowercase =np.asarray(UpperCAmelCase )
else:
_lowercase =img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowercase =nn.functional.interpolate(
UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase ).squeeze(0 )
img_augs.append(UpperCAmelCase )
return img_augs
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Tuple:
_lowercase =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowercase =cfg.INPUT.FORMAT
_lowercase =cfg.SIZE_DIVISIBILITY
_lowercase =cfg.PAD_VALUE
_lowercase =cfg.INPUT.MAX_SIZE_TEST
_lowercase =cfg.MODEL.DEVICE
_lowercase =torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase =torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase =lambda UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def __A (self , UpperCAmelCase ) -> List[str]:
_lowercase =tuple(max(UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
_lowercase =[im.shape[-2:] for im in images]
_lowercase =[
nn.functional.pad(
UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase , UpperCAmelCase )
]
return torch.stack(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
def __call__(self , UpperCAmelCase , UpperCAmelCase=False ) -> str:
with torch.no_grad():
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =[images]
if single_image:
assert len(UpperCAmelCase ) == 1
for i in range(len(UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase , images.pop(UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowercase =torch.tensor([im.shape[:2] for im in images] )
_lowercase =self.aug(UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowercase =[self.normalizer(UpperCAmelCase ) for x in images]
# now pad them to do the following operations
_lowercase , _lowercase =self.pad(UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowercase =torch.true_divide(UpperCAmelCase , UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
_lowercase , _lowercase =box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case )
| 5 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self : List[Any] , a : list[T] , a : Callable[[T, T], T] ):
'''simple docstring'''
lowerCAmelCase__ : Any | T = None
lowerCAmelCase__ : int = len(a )
lowerCAmelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase__ : List[Any] = fnc
self.build()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase__ : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self : Dict , a : int , a : T ):
'''simple docstring'''
p += self.N
lowerCAmelCase__ : Optional[int] = v
while p > 1:
lowerCAmelCase__ : Optional[int] = p // 2
lowerCAmelCase__ : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self : Optional[Any] , a : int , a : int ): # noqa: E741
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = l + self.N, r + self.N
lowerCAmelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase__ : int = self.st[l] if res is None else self.fn(a , self.st[l] )
if r % 2 == 0:
lowerCAmelCase__ : Union[str, Any] = self.st[r] if res is None else self.fn(a , self.st[r] )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCamelCase__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCamelCase__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCamelCase__ = SegmentTree(test_array, min)
lowerCamelCase__ = SegmentTree(test_array, max)
lowerCamelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCAmelCase__ ( ) -> None:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : str = reduce(SCREAMING_SNAKE_CASE_ , test_array[i : j + 1] )
lowerCAmelCase__ : str = reduce(SCREAMING_SNAKE_CASE_ , test_array[i : j + 1] )
lowerCAmelCase__ : Dict = reduce(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert max_range == max_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert sum_range == sum_segment_tree.query(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_all_segments()
for index, value in test_updates.items():
lowerCamelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments() | 307 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 307 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =[
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =[
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def __magic_name__ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =[
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a_ ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : Any =[
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(a_ ) )
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : int =[
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Tuple ="""fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : List[Any] ="""fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__ : List[str] =[
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] ="""fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ : int =[
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ : str ="""fp16"""
self.assertFalse(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str =[
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Dict ="""fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : int ) -> List[Any]:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] ="""fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =[
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : int ="""fp16"""
self.assertFalse(is_safetensors_compatible(a_ , variant=a_ ) ) | 152 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 307 | 0 |
import os
from datetime import datetime as dt
from github import Github
_snake_case : int = [
'good first issue',
'feature request',
'wip',
]
def a_ ( ):
__lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase = sorted([comment for comment in issue.get_comments()], key=lambda lowerCAmelCase_ : i.created_at, reverse=lowerCAmelCase_ )
__lowerCAmelCase = comments[0] if len(lowerCAmelCase_ ) > 0 else None
__lowerCAmelCase = dt.utcnow()
__lowerCAmelCase = (current_time - issue.updated_at).days
__lowerCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 284 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 1 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
SCREAMING_SNAKE_CASE__ = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE__ = -1
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return [] | 360 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCamelCase : List[Any] = get_tests_dir('''fixtures''')
__lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCamelCase : Any = get_tests_dir('''fixtures/dummy-config.json''')
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , revision="""aaaaaa""" )
def __a ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self : str ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Any ):
"""simple docstring"""
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = True
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 204 | 0 |
def a__ ( A_ = 10**9 ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__magic_name__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''t5'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : str = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : int = num_layers
UpperCAmelCase_ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Any = relative_attention_num_buckets
UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = dropout_rate
UpperCAmelCase_ : Tuple = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : int = feed_forward_proj
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Optional[int] = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : int = "gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Union[str, Any] = {0: "batch"}
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 13
| 268 | 0 |
from numpy import exp, pi, sqrt
def UpperCamelCase_( _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
metadata={'help': 'The csv file to plot.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowerCamelCase = list_field(
default=_UpperCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
try:
int(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
try:
float(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
class A :
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[Any] )-> str:
'''simple docstring'''
A__ = args
A__ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file,newline='' ) as csv_file:
A__ = csv.DictReader(lowercase_ )
for row in reader:
A__ = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
A__ = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
A__ = float(row['result'] )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ = plt.subplots()
A__ = 'Time usage' if self.args.is_time else 'Memory usage'
A__ = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A__ = sorted(set(self.result_dict[model_name]['bsz'] ) )
A__ = sorted(set(self.result_dict[model_name]['seq_len'] ) )
A__ = self.result_dict[model_name]['result']
((A__) , (A__)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A__ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A__ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results],dtype=lowercase_,)
else:
A__ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results],dtype=np.floataa,)
((A__) , (A__)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
A__ = np.asarray(lowercase_,lowercase_ )[: len(lowercase_ )]
plt.scatter(
lowercase_,lowercase_,label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowercase_,lowercase_,'--' )
title_str += F' {label_model_name} vs.'
A__ = title_str[:-4]
A__ = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowercase_ )
plt.xlabel(lowercase_ )
plt.ylabel(lowercase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _snake_case( ) -> int:
'''simple docstring'''
A__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
A__ = parser.parse_args_into_dataclasses()[0]
A__ = Plot(args=SCREAMING_SNAKE_CASE__ )
plot.plot()
if __name__ == "__main__":
main()
| 282 |
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
def lowercase_ (A : Optional[int] ):
snake_case__ : str = len(A )
while cur > 1:
# Find the maximum number in arr
snake_case__ : Dict = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case__ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(A )]
# Reverse whole list
snake_case__ : int = arr[cur - 1 :: -1] + arr[cur : len(A )]
cur -= 1
return arr
if __name__ == "__main__":
a_ :Dict = input("Enter numbers separated by a comma:\n").strip()
a_ :Dict = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 277 |
from collections import deque
from .hash_table import HashTable
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]:
super().__init__(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict:
snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
snake_case__ : Dict = self.values[key]
def lowercase_ ( self : Any ) ->Optional[Any]:
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case, _snake_case )
| 277 | 1 |
"""simple docstring"""
import re
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
__A = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(a_ , a_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 351 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A , __A = analyze_text(a_ )
__A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__A = sum(single_char_strings.values() )
# one length string
__A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__A = single_char_strings[ch]
__A = my_str / all_sum
my_fir_sum += prob * math.loga(a_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__A = sum(two_char_strings.values() )
__A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__A = cha + cha
if sequence in two_char_strings:
__A = two_char_strings[sequence]
__A = int(a_ ) / all_sum
my_sec_sum += prob * math.loga(a_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase ( a_ ) -> tuple[dict, dict]:
"""simple docstring"""
__A = Counter() # type: ignore
__A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 124 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 248 |
from typing import Any
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
a_ : List[str] = data
a_ : Optional[int] = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class A__:
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
a_ : Dict = None
def __iter__( self ) -> Any:
a_ : Optional[Any] = self.head
while node:
yield node.data
a_ : Union[str, Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_lowercase ) for item in self] )
def __getitem__( self , _lowercase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _lowercase , _lowercase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
a_ : Optional[Any] = self.head
for _ in range(_lowercase ):
a_ : List[str] = current.next
a_ : Any = data
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(len(self ) , _lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(0 , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
a_ : Optional[int] = Node(_lowercase )
if self.head is None:
a_ : int = new_node
elif index == 0:
a_ : List[Any] = self.head # link new_node to head
a_ : Any = new_node
else:
a_ : Optional[int] = self.head
for _ in range(index - 1 ):
a_ : Optional[int] = temp.next
a_ : Optional[int] = temp.next
a_ : int = new_node
def UpperCamelCase__ ( self ) -> None: # print every node data
print(self )
def UpperCamelCase__ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _lowercase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
a_ : Optional[int] = self.head # default first node
if index == 0:
a_ : List[Any] = self.head.next
else:
a_ : List[Any] = self.head
for _ in range(index - 1 ):
a_ : List[Any] = temp.next
a_ : Any = temp.next
a_ : Any = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
return self.head is None
def UpperCamelCase__ ( self ) -> None:
a_ : Any = None
a_ : Union[str, Any] = self.head
while current:
# Store the current node's next node.
a_ : Dict = current.next
# Make the current node's next point backwards
a_ : Optional[Any] = prev
# Make the previous node be the current node
a_ : Optional[int] = current
# Make the current node the next node (to progress iteration)
a_ : List[str] = next_node
# Return prev in order to put the head at the end
a_ : Dict = prev
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(a__) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0):
assert len(a__) == i
linked_list.insert_nth(a__ , i + 1)
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_1))
linked_list.insert_head(0)
linked_list.insert_tail(1_1)
assert str(a__) == "->".join(str(a__) for i in range(0 , 1_2))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(a__) == 9
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_0))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
a_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(a__) == "->".join(str(a__) for i in range(-8 , 1))
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(1_0),
None,
None,
12.20,
]
a_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(a__)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(a__) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a_ : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a_ : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a_ : List[Any] = linked_list.delete_nth(1_0)
assert result is None
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!"""))
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(a__)
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(a__)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
a_ : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """).strip())
linked_list.insert_head(input("""Inserting 2nd at head """).strip())
print("""\nPrint list:""")
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """).strip())
linked_list.insert_tail(input("""Inserting 2nd at tail """).strip())
print("""\nPrint list:""")
linked_list.print_list()
print("""\nDelete head""")
linked_list.delete_head()
print("""Delete tail""")
linked_list.delete_tail()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nReverse linked list""")
linked_list.reverse()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nString representation of linked list:""")
print(a__)
print("""\nReading/changing Node data using indexing:""")
print(f'''Element at Position 1: {linked_list[1]}''')
a_ : List[Any] = input("""Enter New Value: """).strip()
print("""New list:""")
print(a__)
print(f'''length of linked_list is : {len(a__)}''')
if __name__ == "__main__":
main()
| 248 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.txt"""}
lowercase : Tuple = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowercase : int = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = collections.OrderedDict()
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as reader:
A : Optional[Any] = reader.readlines()
for index, token in enumerate(_snake_case ):
A : Union[str, Any] = token.rstrip('''\n''' )
A : Tuple = index
return vocab
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE=200 ) -> int:
"""simple docstring"""
A : Dict = vocab
A : Dict = unk_token
A : Optional[Any] = max_input_chars_per_word
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Any = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
A : Union[str, Any] = 0
A : Union[str, Any] = []
while start < len(a_ ):
A : int = len(a_ )
A : List[Any] = None
while start < end:
A : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
A : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
A : Tuple = end
return sub_tokens
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = False
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<d>" , SCREAMING_SNAKE_CASE="</d>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="</n>" , SCREAMING_SNAKE_CASE="</_>" , SCREAMING_SNAKE_CASE="left" , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
A : Dict = bod_token
A : Dict = eod_token
A : Optional[Any] = load_vocab(a_ )
A : Dict = self.encoder[space_token]
A : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE : x[1] ) )
A : Optional[int] = {v: k for k, v in self.encoder.items()}
A : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return self.encoder["\n"]
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : str = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Union[str, Any] = [i for i in token_ids if i >= 0]
A : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return token in self.encoder
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return "".join(a_ )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.decoder.get(a_ , self.unk_token )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Optional[int]:
"""simple docstring"""
if os.path.isdir(a_ ):
A : Any = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
A : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
A : Any = 0
if " " in self.encoder:
A : Any = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
A : List[str] = self.encoder['''\n''']
del self.encoder["\n"]
A : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE : x[1] ) )
with open(a_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
A : Tuple = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 352 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = SwinConfig()
A : List[Any] = swin_name.split('''_''' )
A : Tuple = name_split[1]
A : Union[str, Any] = int(name_split[4] )
A : str = int(name_split[3][-1] )
if model_size == "tiny":
A : Optional[int] = 96
A : Optional[Any] = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif model_size == "small":
A : Optional[int] = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif model_size == "base":
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : List[str] = (4, 8, 16, 32)
else:
A : Dict = 192
A : Optional[Any] = (2, 2, 18, 2)
A : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
A : Dict = 2_1841
else:
A : str = 1000
A : List[str] = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : str = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Tuple = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Tuple = img_size
A : Dict = num_classes
A : Optional[Any] = embed_dim
A : str = depths
A : str = num_heads
A : Optional[int] = window_size
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
A : Tuple = '''layernorm.bias'''
if "head" in name:
A : Any = name.replace('''head''' , '''classifier''' )
else:
A : List[Any] = '''swin.''' + name
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A : Dict = key.split('''.''' )
A : Optional[int] = int(key_split[1] )
A : List[str] = int(key_split[3] )
A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Any = val[:dim, :]
A : Dict = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Any = val[
:dim
]
A : Optional[int] = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A : Optional[Any] = get_swin_config(snake_case__ )
A : Optional[int] = SwinForImageClassification(snake_case__ )
model.eval()
A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' )
A : Any = timm_model(inputs['''pixel_values'''] )
A : Optional[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 311 | 0 |
import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=16 , __snake_case=36 , __snake_case=6 , __snake_case=6 , __snake_case=6 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ) -> List[str]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_mask
__a =use_token_type_ids
__a =use_labels
__a =vocab_size
__a =embedding_size
__a =hidden_size
__a =num_hidden_layers
__a =num_hidden_groups
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =scope
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =None
if self.use_input_mask:
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =AlbertModel(config=__a )
model.to(__a )
model.eval()
__a =model(__a , attention_mask=__a , token_type_ids=__a )
__a =model(__a , token_type_ids=__a )
__a =model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a =AlbertForPreTraining(config=__a )
model.to(__a )
model.eval()
__a =model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =AlbertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a =model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =AlbertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a =model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
__a =self.num_labels
__a =AlbertForSequenceClassification(__a )
model.to(__a )
model.eval()
__a =model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =AlbertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a =model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =self.num_choices
__a =AlbertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> Tuple:
'''simple docstring'''
__a =super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
__a =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =AlbertModelTester(self )
__a =ConfigTester(self , config_class=__a , hidden_size=37 )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a =type
self.model_tester.create_and_check_model(*__a )
@slow
def __magic_name__ ( self ) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =AlbertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =AlbertModel.from_pretrained('albert-base-v2' )
__a =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__a =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a =model(__a , attention_mask=__a )[0]
__a =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
__a =torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None:
super().__init__(**UpperCamelCase )
snake_case__ = size if size is not None else {'shortest_edge': 2_56}
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray:
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray:
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]:
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
snake_case__ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 307 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ = {
"""camembert-base""": 512,
}
UpperCamelCase__ = """▁"""
class a__ ( snake_case__ ):
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Tuple = ["""input_ids""", """attention_mask"""]
_a : Optional[Any] = CamembertTokenizer
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=["<s>NOTUSED", "</s>NOTUSED"] , **_A , ):
"""simple docstring"""
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 102 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : float = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__UpperCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
__UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
__UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __lowerCAmelCase ( self , __A , __A , __A=0.9 , __A=3 , __A=0.5 ) -> int:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCAmelCase_ :List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(__A ) , word_tokenize(__A ) , alpha=__A , beta=__A , gamma=__A )
for ref, pred in zip(__A , __A )
]
else:
lowerCAmelCase_ :Tuple = [
meteor_score.single_meteor_score(__A , __A , alpha=__A , beta=__A , gamma=__A )
for ref, pred in zip(__A , __A )
]
return {"meteor": np.mean(__A )}
| 84 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[Any] = set()
snake_case_ : Optional[Any] = []
def parse_line(lowerCamelCase_ :Union[str, Any] ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : Union[str, Any] = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
snake_case_ : str = """\n""".join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
snake_case_ : Any = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
snake_case_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[Any] = set()
snake_case_ : Tuple = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
return values.split(""",""" )
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__A : Union[str, Any] = parser.parse_args()
__A : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A : int = extract_warnings(args.output_dir, args.targets)
__A : Dict = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 8 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 8 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCAmelCase ( a__ ) -> Tuple:
__a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
__a , __a = emb.weight.shape
__a = nn.Linear(a__ , a__ , bias=a__ )
__a = emb.weight.data
return lin_layer
def __lowerCAmelCase ( a__ , a__=None ) -> List[Any]:
__a = {}
for old_key in state_dict.keys():
__a = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__a = key.replace('''moe_layer.experts.0''' , F"""ffn.experts.expert_{expert_idx}""" )
else:
__a = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
__a = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
__a = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
__a = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
__a = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
__a = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
__a = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
__a = state_dict[old_key]
return new_dict
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ = WEIGHTS_NAME ) -> Any:
__a = []
__a = 0
os.makedirs(a__ , exist_ok=a__ )
for expert in range(a__ ):
__a = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(a__ ):
__a = torch.load(a__ )['''model''']
remove_ignore_keys_(a__ )
__a = rename_fairseq_keys(a__ , a__ )
__a = os.path.join(
a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
torch.save(a__ , a__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(a__ )[0]].dtype )
# Add the last block
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
__a = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(a__ )
__a = rename_fairseq_keys(a__ , a__ )
__a = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a__ ) == 1:
__a = os.path.join(a__ , a__ )
torch.save(a__ , a__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a__ , a__ )
# Otherwise, let's build the index
__a = {}
for idx, shard in enumerate(a__ ):
__a = weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-{len(a__ ):05d}.bin""" )
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(a__ , indent=2 , sort_keys=a__ ) + '''\n'''
f.write(a__ )
return metadata, index
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A : Optional[Any] = parser.parse_args()
A , A : List[str] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
A : Dict = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
A : Optional[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 6 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : bool = True , **A_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ = do_convert_rgb
def a__ ( self : Dict , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : str , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> str:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : int = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : bool = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , param_name='size' , default_to_square=A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 204 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = '''dpr'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Any=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1E-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : int = 0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = vocab_size
a_ : List[str] = hidden_size
a_ : int = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : Dict = intermediate_size
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : Any = type_vocab_size
a_ : Optional[Any] = initializer_range
a_ : str = layer_norm_eps
a_ : Dict = projection_dim
a_ : List[str] = position_embedding_type
| 120 |
from string import ascii_uppercase
UpperCAmelCase_ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[int] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Tuple = len(__A )
a_ : int = 0
while True:
if x == i:
a_ : Tuple = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = ''
a_ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Any = ''
a_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Tuple = 'THE GERMAN ATTACK'
a_ : Dict = 'SECRET'
a_ : Optional[Any] = generate_key(__A , __A )
a_ : Union[str, Any] = cipher_text(__A , __A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__A , __A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120 | 1 |
"""simple docstring"""
lowerCamelCase__ = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCamelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCAmelCase (_UpperCamelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCAmelCase (_UpperCamelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = 'Morse code here!'
print(_UpperCamelCase )
__lowerCAmelCase : List[str] = encrypt(_UpperCamelCase )
print(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = decrypt(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''') | 308 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Any = {'''vocab_file''': '''spiece.model'''}
A__ : str = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
A__ : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __a : int , __a : List[str]=False , __a : int=False , __a : Union[str, Any]=False , __a : Union[str, Any]=None , __a : str=None , __a : Tuple=None , __a : Dict=None , __a : Optional[Dict[str, Any]] = None , **__a : Union[str, Any] , ) -> None:
'''simple docstring'''
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case : Dict = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__snake_case : Tuple = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__snake_case : str = '<|endoftext|>' if eos_token is None else eos_token
__snake_case : Any = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__snake_case : Tuple = unk_token if pad_token is None else pad_token
__snake_case : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
__snake_case : List[str] = '<pad>' if pad_token is None else pad_token
__snake_case : List[Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__snake_case : Tuple = do_lower_case
__snake_case : Union[str, Any] = remove_space
__snake_case : List[Any] = keep_accents
__snake_case : Optional[Any] = vocab_file
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# Used for whitespace normalization in input texts
# fmt : off
__snake_case : Union[str, Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__snake_case : List[Any] = re.compile(
f'''[{"".join(map(__a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : List[Any] = None
return state
def __setstate__( self : Dict , __a : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : str = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A_ ( self : int ) -> int:
'''simple docstring'''
return len(self.sp_model )
def A_ ( self : List[Any] , __a : str ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.non_printing_characters_re.sub('' , __a )
# Normalize whitespaces
__snake_case : Optional[int] = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__snake_case : Any = unicodedata.normalize('NFC' , __a )
return text
def A_ ( self : Dict , __a : str , **__a : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = self.preprocess_text(__a )
return self.sp_model.encode(__a , out_type=__a )
def A_ ( self : List[str] , __a : str ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def A_ ( self : List[str] , __a : int ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
@staticmethod
def A_ ( __a : str ) -> str:
'''simple docstring'''
return out_string
def A_ ( self : str , __a : List[str] ) -> str:
'''simple docstring'''
__snake_case : List[str] = []
__snake_case : Optional[Any] = ''
__snake_case : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__snake_case : Tuple = True
__snake_case : List[str] = []
else:
current_sub_tokens.append(__a )
__snake_case : Optional[int] = False
out_string += self.sp_model.decode(__a )
return out_string
def A_ ( self : List[Any] ) -> Dict[str, int]:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : int = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def A_ ( self : str , __a : Union[str, List[str]] , __a : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(__a , __a ):
__snake_case : Dict = self.preprocess_text(__a )
__snake_case : Tuple = self.sp_model.encode(__a )
else:
__snake_case : Tuple = [self.preprocess_text(__a ) for t in text]
__snake_case : Optional[Any] = self.sp_model.encode(__a )
if return_tensors is True or return_tensors == "pt":
__snake_case : Any = torch.tensor(__a )
return token_ids
def A_ ( self : List[Any] , __a : Union[int, List[int]] ) -> str:
'''simple docstring'''
return self.sp_model.decode(__a )
def A_ ( self : Optional[int] , __a : "Conversation" ) -> List[int]:
'''simple docstring'''
__snake_case : Any = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__snake_case : Optional[int] = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__a ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__a )
| 0 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''contact@muhammadumerfarooq.me'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 1 |
from __future__ import annotations
def a_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[list[str]] , __lowercase : int , ) -> None:
_snake_case = len(__lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowercase , __lowercase , )
def a_ ( __lowercase : int ) -> None:
_snake_case = []
depth_first_search([] , [] , [] , __lowercase , __lowercase )
# Print all the boards
for board in boards:
for column in board:
print(__lowercase )
print('' )
print(len(__lowercase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int]=3_2, UpperCAmelCase__ : str=1_0, UpperCAmelCase__ : str=1_0_0, UpperCAmelCase__ : Optional[int]=1_0_2_6, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : Optional[int]="data/tokenized_stories_train_wikitext103.jbl", UpperCAmelCase__ : str="igf_context_pairs.jbl", ) ->Tuple:
set_seed(3 )
# generate train_data and objective_set
A__ , A__ : Any = generate_datasets(
UpperCAmelCase__, UpperCAmelCase__, number=UpperCAmelCase__, min_len=1_0_2_6, trim=UpperCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A__ : str = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A__ : Any = load_gpta("""gpt2""" ).to(UpperCAmelCase__ )
print("""computing perplexity on objective set""" )
A__ : str = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).item()
print("""perplexity on objective set:""", UpperCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : int=1_5, UpperCAmelCase__ : List[Any]=1_2_8, UpperCAmelCase__ : Tuple=1_0_0, UpperCAmelCase__ : Union[str, Any]="igf_model.pt", ) ->Optional[int]:
set_seed(4_2 )
# Load pre-trained model
A__ : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A__ : Union[str, Any] = SecondaryLearner(UpperCAmelCase__ )
# Train secondary learner
A__ : Dict = train_secondary_learner(
UpperCAmelCase__, UpperCAmelCase__, max_epochs=UpperCAmelCase__, batch_size=UpperCAmelCase__, eval_freq=1_0_0, igf_model_path=UpperCAmelCase__, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : str=1_0_0_0, UpperCAmelCase__ : Optional[Any]=1_6, UpperCAmelCase__ : List[Any]=1.0, UpperCAmelCase__ : str=recopy_gpta, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : Optional[int]=1_0, UpperCAmelCase__ : Dict="gpt2_finetuned.pt", ) ->Tuple:
A__ : int = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A__ : Optional[Any] = RandomSampler(UpperCAmelCase__ )
A__ : Tuple = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__ )
A__ : List[str] = max_steps // (len(UpperCAmelCase__ )) + 1
A__ : List[Any] = 0
A__ : Optional[Any] = torch.zeros((1, context_len), dtype=torch.long, device=UpperCAmelCase__ )
A__ , A__ , A__ : Any = recopy_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase__ )
secondary_learner.eval()
A__ : Union[str, Any] = []
A__ : Optional[int] = 0
A__ : Union[str, Any] = []
A__ : Union[str, Any] = []
# Compute the performance of the transformer model at the beginning
A__ : Any = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
for epoch in range(int(UpperCAmelCase__ ) ):
for step, example in enumerate(UpperCAmelCase__ ):
torch.cuda.empty_cache()
A__ : Optional[int] = random.randint(0, example.size(2 ) - context_len - 1 )
A__ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A__ : int = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
A__ : Dict = True
if secondary_learner is not None:
A__ : Union[str, Any] = secondary_learner.forward(
torch.tensor(UpperCAmelCase__, dtype=torch.long, device=UpperCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
A__ : Optional[int] = -1
if predicted_q < threshold:
A__ : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A__ : List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A__ : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A__ : Optional[int] = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict(), UpperCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCAmelCase ( ) ->Optional[Any]:
A__ : Dict = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The input data dir. Should contain data files for WikiText.""", )
parser.add_argument(
"""--model_name_or_path""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
), )
parser.add_argument(
"""--igf_data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A jbl file containing the context and information gain pairs to train secondary learner.""", )
parser.add_argument(
"""--output_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The output directory where the final fine-tuned model is stored.""", )
parser.add_argument(
"""--tokenizer_name""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument("""--seed""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""", default=3_2, type=UpperCAmelCase__, help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
), )
parser.add_argument(
"""--size_objective_set""", default=1_0_0, type=UpperCAmelCase__, help="""number of articles that are long enough to be used as our objective set""", )
parser.add_argument(
"""--eval_freq""", default=1_0_0, type=UpperCAmelCase__, help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""", default=1_0_0_0, type=UpperCAmelCase__, help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""", default=1_2_8, type=UpperCAmelCase__, help="""batch size of training data for secondary learner""", )
parser.add_argument(
"""--batch_size""", default=1_6, type=UpperCAmelCase__, help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""", default=1_0, type=UpperCAmelCase__, help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
), )
parser.add_argument(
"""--number""", default=1_0_0, type=UpperCAmelCase__, help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""", default=1_0_2_6, type=UpperCAmelCase__, help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""", default=1_5, type=UpperCAmelCase__, help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""", default=1.0, type=UpperCAmelCase__, help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
), )
parser.add_argument("""--finetuned_model_name""", default="""gpt2_finetuned.pt""", type=UpperCAmelCase__, help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Reset the model to the original pretrained GPT-2 weights after each iteration""", )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2, max_steps=1_0, size_objective_set=1_0_0, min_len=1_0_2_6, trim=UpperCAmelCase__, data_file="""data/tokenized_stories_train_wikitext103.jbl""", igf_data_file="""igf_context_pairs.jbl""", )
# Load train data for secondary learner
A__ : Tuple = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A__ : Tuple = training_secondary_learner(
UpperCAmelCase__, secondary_learner_max_epochs=1_5, secondary_learner_batch_size=1_2_8, eval_freq=1_0_0, igf_model_path="""igf_model.pt""", )
# load pretrained gpt2 model
A__ : str = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
A__ , A__ : Union[str, Any] = generate_datasets(
context_len=3_2, file="""data/tokenized_stories_train_wikitext103.jbl""", number=1_0_0, min_len=1_0_2_6, trim=UpperCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, context_len=3_2, max_steps=1_0_0_0, batch_size=1_6, threshold=1.0, recopy_model=UpperCAmelCase__, secondary_learner=UpperCAmelCase__, eval_interval=1_0, finetuned_model_name="""gpt2_finetuned.pt""", )
if __name__ == "__main__":
main()
| 296 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple ="umt5"
a : Optional[int] =["past_key_values"]
def __init__( self , snake_case__=250_112 , snake_case__=512 , snake_case__=64 , snake_case__=1_024 , snake_case__=8 , snake_case__=None , snake_case__=6 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gated-gelu" , snake_case__=True , snake_case__=True , snake_case__="T5Tokenizer" , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=snake_case__ , tokenizer_class=snake_case__ , tie_word_embeddings=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = d_model
lowerCAmelCase : List[str] = d_kv
lowerCAmelCase : Union[str, Any] = d_ff
lowerCAmelCase : Any = num_layers
lowerCAmelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase : Any = num_heads
lowerCAmelCase : List[str] = relative_attention_num_buckets
lowerCAmelCase : Optional[Any] = relative_attention_max_distance
lowerCAmelCase : Any = dropout_rate
lowerCAmelCase : Any = layer_norm_epsilon
lowerCAmelCase : Any = initializer_factor
lowerCAmelCase : Union[str, Any] = feed_forward_proj
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : Tuple = self.feed_forward_proj.split("-" )
lowerCAmelCase : Optional[Any] = act_info[-1]
lowerCAmelCase : Union[str, Any] = act_info[0] == "gated"
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase : Union[str, Any] = "gelu_new"
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.num_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.num_layers
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase : Any = "past_encoder_sequence + sequence"
lowerCAmelCase : str = {0: "batch"}
lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase__ ( self ):
"""simple docstring"""
return 13
@property
def lowercase__ ( self ):
"""simple docstring"""
return 5e-4
| 108 |
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def decorator(lowercase ):
snake_case : Tuple = getattr(lowercase ,"""handle_key""" ,[] )
handle += [key]
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> List[str]:
def decorator(lowercase ):
snake_case : Optional[int] = getattr(lowercase ,"""handle_key""" ,[] )
handle += keys
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __new__( cls , A , A , A ) -> str:
snake_case : int = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case : Dict = getattr(A , """handle_key""" , [] )
for key in handled_keys:
snake_case : Any = value
return new_cls
@staticmethod
def UpperCAmelCase ( cls ) -> List[str]:
snake_case : Tuple = get_character()
if char != KEYMAP["undefined"]:
snake_case : str = ord(A )
snake_case : Optional[Any] = cls.key_handler.get(A )
if handler:
snake_case : Optional[Any] = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE__ ( cls ) -> Tuple:
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 124 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
UpperCAmelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : str = jax.device_count()
UpperCAmelCase__ : str = num_samples * [prompt]
UpperCAmelCase__ : str = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCAmelCase__ : str = replicate(__lowerCAmelCase )
UpperCAmelCase__ : int = shard(__lowerCAmelCase )
UpperCAmelCase__ : Tuple = jax.random.PRNGKey(0 )
UpperCAmelCase__ : Tuple = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCAmelCase__ : Any = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ : int = images[0, 253:256, 253:256, -1]
UpperCAmelCase__ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ : Dict = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''stabilityai/stable-diffusion-2'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder='''scheduler''' )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
UpperCAmelCase__ : Optional[int] = scheduler_params
UpperCAmelCase__ : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : int = jax.device_count()
UpperCAmelCase__ : Union[str, Any] = num_samples * [prompt]
UpperCAmelCase__ : List[Any] = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCAmelCase__ : List[str] = replicate(__lowerCAmelCase )
UpperCAmelCase__ : int = shard(__lowerCAmelCase )
UpperCAmelCase__ : Any = jax.random.PRNGKey(0 )
UpperCAmelCase__ : Tuple = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCAmelCase__ : Optional[int] = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase__ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ : Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCAmelCase__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ : Optional[int] = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 371 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = 'vit_mae'
def __init__(self : Dict , a__ : Union[str, Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Any=3072 , a__ : Any="gelu" , a__ : Optional[Any]=0.0 , a__ : Dict=0.0 , a__ : int=0.0_2 , a__ : Optional[Any]=1E-12 , a__ : Optional[Any]=224 , a__ : List[str]=16 , a__ : str=3 , a__ : List[str]=True , a__ : Dict=16 , a__ : List[str]=512 , a__ : List[str]=8 , a__ : Any=2048 , a__ : Dict=0.7_5 , a__ : int=False , **a__ : List[str] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
__snake_case = decoder_num_attention_heads
__snake_case = decoder_hidden_size
__snake_case = decoder_num_hidden_layers
__snake_case = decoder_intermediate_size
__snake_case = mask_ratio
__snake_case = norm_pix_loss
| 24 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( lowercase__ ):
'''simple docstring'''
__UpperCamelCase = """vit"""
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-1_2 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , **_a , ):
"""simple docstring"""
super().__init__(**lowercase_ )
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = qkv_bias
lowerCamelCase = encoder_stride
class __magic_name__ ( lowercase__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return 1e-4
| 351 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = os.path.join(
_a , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCamelCase = torch.load(_a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCamelCase = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("""Training examples: %s""" , len(_a ) )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase = tf.data.Dataset.from_generator(
_a , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = []
for i, line in enumerate(_a ):
if i == 0:
continue
lowerCamelCase = """%s-%s""" % (set_type, line[0])
lowerCamelCase = line[5]
lowerCamelCase = line[6]
lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )}
lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
lowerCamelCase = label_map[example.label] if example.label in label_map else 0
lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : str = {
"""hans""": HansProcessor,
}
| 168 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''ernie_m'''
__magic_name__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , lowerCAmelCase_ : int = 250_002 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 514 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 1e-05 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=0.0 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Tuple = classifier_dropout
UpperCAmelCase_ : Union[str, Any] = is_decoder
UpperCAmelCase_ : Optional[Any] = act_dropout
| 268 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = MBartTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : str = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase_ : List[Any] = src_lang
UpperCAmelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None:
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 268 | 1 |
"""simple docstring"""
def snake_case ( A__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ : Union[str, Any] = len(bin(A__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : List[Any] = (
(
"1"
+ "0" * (binary_number_length - len(A__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( lowerCamelCase: List[str] , lowerCamelCase: int=0.999 , lowerCamelCase: Union[str, Any]="cosine" , ) -> Optional[int]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase: Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase: Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__A = []
for i in range(lowerCamelCase ):
__A = i / num_diffusion_timesteps
__A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) , lowerCamelCase ) )
return torch.tensor(lowerCamelCase , dtype=torch.floataa )
class A_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ = 2
@register_to_config
def __init__(self :Tuple , _UpperCamelCase :Optional[int] = 1000 , _UpperCamelCase :Any = 0.0_0_0_8_5 , _UpperCamelCase :List[Any] = 0.0_1_2 , _UpperCamelCase :str = "linear" , _UpperCamelCase :List[Any] = None , _UpperCamelCase :Dict = "epsilon" , _UpperCamelCase :Union[str, Any] = "linspace" , _UpperCamelCase :Tuple = 0 , )-> Dict:
if trained_betas is not None:
__A = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__A = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__A = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__A = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__A = 1.0 - self.betas
__A = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any]=None )-> Optional[int]:
if schedule_timesteps is None:
__A = self.timesteps
__A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__A = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
__A = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
__A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCAmelCase (self :Dict )-> Any:
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :Any , )-> int:
__A = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
__A = self.sigmas[step_index]
else:
__A = self.sigmas_interpol[step_index]
__A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCAmelCase (self :str , _UpperCamelCase :int , _UpperCamelCase :Any = None , _UpperCamelCase :int = None , )-> List[str]:
__A = num_inference_steps
__A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__A = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__A = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__A = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" )
__A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__A = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
__A = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
__A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__A = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
__A = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__A = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__A = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith('''mps''' ):
# mps does not support float64
__A = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
__A = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
__A = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
__A = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__A = torch.cat([timesteps[:1], interleaved_timesteps] )
__A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__A = defaultdict(lowerCamelCase_ )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[Any] )-> str:
__A = sigma.log()
# get distribution
__A = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__A = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__A = low_idx + 1
__A = self.log_sigmas[low_idx]
__A = self.log_sigmas[high_idx]
# interpolate sigmas
__A = (low - log_sigma) / (low - high)
__A = w.clamp(0 , 1 )
# transform interpolation to time range
__A = (1 - w) * low_idx + w * high_idx
__A = t.view(sigma.shape )
return t
@property
def _lowerCAmelCase (self :List[str] )-> str:
return self.sample is None
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Any , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[Any] = True , )-> Union[str, Any]:
__A = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
__A = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__A = self.sigmas[step_index]
__A = self.sigmas_interpol[step_index + 1]
__A = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__A = self.sigmas[step_index - 1]
__A = self.sigmas_interpol[step_index]
__A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__A = 0
__A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__A = sigma_hat if self.state_in_first_order else sigma_interpol
__A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__A = sigma_hat if self.state_in_first_order else sigma_interpol
__A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__A = sigma_interpol - sigma_hat
# store for 2nd order step
__A = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__A = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__A = sigma_next - sigma_hat
__A = self.sample
__A = None
__A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :List[Any] , )-> Union[str, Any]:
__A = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
__A = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__A = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__A = self.timesteps.to(original_samples.device )
__A = timesteps.to(original_samples.device )
__A = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
__A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__A = sigma.unsqueeze(-1 )
__A = original_samples + noise * sigma
return noisy_samples
def __len__(self :Dict )-> Dict:
return self.config.num_train_timesteps
| 355 |
from __future__ import annotations
snake_case__ : Dict = [True] * 1000001
snake_case__ : int = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
snake_case__ : str = False
i += 1
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
return seive[n]
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(lowerCamelCase ) )
def _a ( lowerCamelCase: int = 1_00_00_00 ) -> list[int]:
'''simple docstring'''
__A = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowerCamelCase ) and not contains_an_even_digit(lowerCamelCase ):
__A = str(lowerCamelCase )
__A = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowerCamelCase ) )]
if all(is_prime(lowerCamelCase ) for i in list_nums ):
result.append(lowerCamelCase )
return result
def _a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 250 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = set()
snake_case_ = []
def parse_line(SCREAMING_SNAKE_CASE__ ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE__ )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE__ )
buffer.clear()
continue
else:
snake_case_ = line.strip()
buffer.append(SCREAMING_SNAKE_CASE__ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE__ ) as fp:
parse_line(SCREAMING_SNAKE_CASE__ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE__ ) as fp:
parse_line(SCREAMING_SNAKE_CASE__ )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = set()
snake_case_ = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return values.split(''',''' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase_ = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[int] = {
"""yjernite/retribert-base-uncased""": 512,
}
lowerCAmelCase : List[Any] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = RetriBertTokenizer
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
lowerCamelCase = getattr(_a , normalizer_state.pop("""type""" ) )
lowerCamelCase = do_lower_case
lowerCamelCase = strip_accents
lowerCamelCase = tokenize_chinese_chars
lowerCamelCase = normalizer_class(**_a )
lowerCamelCase = do_lower_case
def _lowerCAmelCase ( self , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 168 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCamelCase = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self ):
"""simple docstring"""
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 168 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : Dict = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__A : str = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__A : Optional[int] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__A : List[Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'philschmid/bart-large-cnn-samsum'
lowercase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowercase = 'summarizer'
lowercase = AutoTokenizer
lowercase = AutoModelForSeqaSeqLM
lowercase = ['text']
lowercase = ['text']
def __lowercase ( self : Dict , lowerCamelCase : Dict ) -> Any:
return self.pre_processor(lowerCamelCase , return_tensors="""pt""" , truncation=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
return self.model.generate(**lowerCamelCase )[0]
def __lowercase ( self : str , lowerCamelCase : Dict ) -> List[Any]:
return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
| 120 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase__ :Optional[int] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase__ :Tuple = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ , 1_1_0 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
assert gg.gaussian_filter(a__ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCAmelCase = conv.img_convolve(a__ , a__ ).astype(a__ )
assert res.any()
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
assert med.median_filter(a__ , 3 ).any()
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = sp.make_sepia(a__ , 2_0 )
assert sepia.all()
def lowerCAmelCase__ ( a__: str = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = bs.Burkes(imread(a__ , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( a__: str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = rs.NearestNeighbour(imread(a__ , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_UpperCAmelCase = imread(a__ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = image[x_coordinate][y_coordinate]
_UpperCAmelCase = lbp.get_neighbors_pixel(
a__ , a__ , a__ , a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCAmelCase = lbp.local_binary_value(a__ , a__ , a__ )
assert lbp_image.any()
| 185 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 185 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
UpperCAmelCase__ = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ) ->None:
"""simple docstring"""
a = {} if sp_model_kwargs is None else sp_model_kwargs
a = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a = '''<|endoftext|>''' if eos_token is None else eos_token
a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a = unk_token if pad_token is None else pad_token
a = eos_token if bos_token is None else bos_token
else:
a = '''<pad>''' if pad_token is None else pad_token
a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a = re.compile(
F"""[{''.join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : int ) ->Any:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : int , __UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->str:
"""simple docstring"""
a = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase )
# Normalize whitespaces
a = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
a = unicodedata.normalize('''NFC''' , __UpperCAmelCase )
return text
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->int:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : int ) ->str:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : str ) ->str:
"""simple docstring"""
return out_string
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] ) ->str:
"""simple docstring"""
a = []
a = ''''''
a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
a = True
a = []
else:
current_sub_tokens.append(__UpperCAmelCase )
a = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def __lowerCAmelCase ( self : List[str] ) ->Dict[str, int]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : Union[str, bool] = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase )
else:
a = [self.preprocess_text(__UpperCAmelCase ) for t in text]
a = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
a = torch.tensor(__UpperCAmelCase )
return token_ids
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[int, List[int]] ) ->str:
"""simple docstring"""
return self.sp_model.decode(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : "Conversation" ) ->List[int]:
"""simple docstring"""
a = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
a = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 0 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=5 ) -> Dict:
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
_SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(A__ ,add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
_SCREAMING_SNAKE_CASE = model(A__ )[0] # The last hidden-state is the first element of the output tuple
_SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_SCREAMING_SNAKE_CASE = logits[0, masked_index, :]
_SCREAMING_SNAKE_CASE = logits.softmax(dim=0 )
_SCREAMING_SNAKE_CASE = prob.topk(k=A__ ,dim=0 )
_SCREAMING_SNAKE_CASE = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
_SCREAMING_SNAKE_CASE = tokenizer.mask_token
_SCREAMING_SNAKE_CASE = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
_SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("""\u2581""" ,""" """ )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(A__ ) ,A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ ,A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCamelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
UpperCamelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
UpperCamelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.