code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __snake_case ( a ):
UpperCAmelCase__ : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "transcription"
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""")
if not isinstance(features[self.audio_column] , _snake_case):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.input_schema.copy()
UpperCAmelCase_ = features[self.audio_column]
UpperCAmelCase_ = input_schema
return task_template
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 51
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""audio_values""", """audio_mask"""]
def __init__( self :List[str] , lowerCamelCase_ :List[str]=2_048 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=[16, 16] , lowerCamelCase_ :str=128 , lowerCamelCase_ :Union[str, Any]=44_100 , lowerCamelCase_ :Optional[Any]=86 , lowerCamelCase_ :Dict=2_048 , lowerCamelCase_ :Union[str, Any]=0.0 , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =spectrogram_length
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : List[Any] =patch_size
lowerCamelCase__ : Union[str, Any] =feature_size // self.patch_size[1]
lowerCamelCase__ : int =n_fft
lowerCamelCase__ : List[str] =sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : str =sampling_rate
lowerCamelCase__ : int =padding_value
lowerCamelCase__ : Dict =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCamelCase_ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.array ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =spectrogram(
lowerCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowerCamelCase__ : Any =log_spec[:, :-1]
lowerCamelCase__ : Tuple =log_spec - 20.0
lowerCamelCase__ : List[str] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Dict =isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ : Union[str, Any] =is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Union[str, Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : List[str] =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : Any =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase_ ):
lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : Any =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : Union[str, Any] =np.array(lowerCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : Tuple =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : str =np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Dict =padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Union[str, Any] =audio_features[i]
lowerCamelCase__ : Union[str, Any] =feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : int ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCamelCase__ : Tuple ={'audio_values': padded_audio_features}
lowerCamelCase__ : Union[str, Any] =BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
return encoded_inputs
| 126
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = FunnelTokenizer
_snake_case : Union[str, Any] = FunnelTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : Tuple = True
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def snake_case__ ( self : List[str] , **lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file )
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
_UpperCamelCase = tokenizer('''UNwant\u00E9d,running''' )
_UpperCamelCase = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
_UpperCamelCase = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 352
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int, lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(lowercase, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(lowercase, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, lowercase, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0, 0, lowercase )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(dp_array[row][col], lowercase )
else:
_UpperCamelCase = 0
return largest_square_area
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(current_row[col], lowercase )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287
| 0
|
from __future__ import annotations
from collections import namedtuple
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple:
a = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 228
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_A : List[str] ={
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase__ : int = list(s_dict.keys() )
for key in keys:
lowerCamelCase__ : str = r""".*/layers_(\d+)"""
lowerCamelCase__ : int = key
if re.match(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , UpperCamelCase )
lowerCamelCase__ : Tuple = r"""(encoder|decoder)\/"""
if re.match(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[Any] = re.match(UpperCamelCase , UpperCamelCase ).groups()
if groups[0] == "encoder":
lowerCamelCase__ : Optional[Any] = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , UpperCamelCase )
lowerCamelCase__ : int = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , UpperCamelCase )
elif groups[0] == "decoder":
lowerCamelCase__ : List[str] = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase__ : Union[str, Any] = new_key.replace(UpperCamelCase , UpperCamelCase )
print(f'''{key} -> {new_key}''' )
lowerCamelCase__ : str = s_dict.pop(UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase__ : List[Any] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase__ : str = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase__ : Union[str, Any] = s_dict[key].shape[0]
lowerCamelCase__ : Tuple = s_dict[key]
for idx in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = expert_weihts[idx]
print(f'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(UpperCamelCase )
return s_dict
_A : str ={
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCamelCase , """r""" ) as f:
lowerCamelCase__ : Dict = f.read()
lowerCamelCase__ : int = re.findall(r"""(.*) = ([0-9.]*)""" , UpperCamelCase )
lowerCamelCase__ : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase__ : Optional[Any] = float(UpperCamelCase ) if """.""" in value else int(UpperCamelCase )
lowerCamelCase__ : Optional[int] = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , UpperCamelCase )[0]
lowerCamelCase__ : Optional[Any] = str(activation[1] )
lowerCamelCase__ : str = num_experts
lowerCamelCase__ : str = SwitchTransformersConfig(**UpperCamelCase )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase="./" , UpperCamelCase=8 ) -> Tuple:
# Initialise PyTorch model
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
lowerCamelCase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
if gin_file is not None:
lowerCamelCase__ : Dict = convert_gin_to_config(UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Dict = SwitchTransformersConfig.from_pretrained(UpperCamelCase )
lowerCamelCase__ : List[Any] = SwitchTransformersForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ : List[str] = flax_params["""target"""]
lowerCamelCase__ : Union[str, Any] = flatten_dict(UpperCamelCase , sep="""/""" )
lowerCamelCase__ : str = rename_keys(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = unflatten_dict(UpperCamelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
_A : int =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 362
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_A : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: Union[str, Any] , **UpperCamelCase__: str ):
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self: Optional[Any] , UpperCamelCase__: Union[np.ndarray, bytes, str] , **UpperCamelCase__: int ):
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCamelCase__ : int = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str]=None , UpperCamelCase__: Dict="This is a sound of {}." ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ : int = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : Dict = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : str = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowerCamelCase__ : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowerCamelCase__ : Any = candidate_labels
lowerCamelCase__ : Any = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowerCamelCase__ : int = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowerCamelCase__ : Tuple = [text_inputs]
return inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = model_inputs.pop("""candidate_labels""" )
lowerCamelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Tuple = text_inputs[0][0]
lowerCamelCase__ : Tuple = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = model_outputs.pop("""candidate_labels""" )
lowerCamelCase__ : int = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 )
lowerCamelCase__ : Any = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowerCamelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 129
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = 10
UpperCamelCase_ : Optional[Any] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCamelCase_ : str = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
a_ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[Any] ):
UpperCamelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCamelCase_ : List[str] = FILE_CONTENT
with open(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[Any] ):
import bza
UpperCamelCase_ : str = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCamelCase_ : Union[str, Any] = bytes(__UpperCamelCase , 'utf-8' )
with bza.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[int] ):
import gzip
UpperCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCamelCase_ : List[str] = bytes(__UpperCamelCase , 'utf-8' )
with gzip.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase_ : str = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCamelCase_ : List[str] = bytes(__UpperCamelCase , 'utf-8' )
with lza.frame.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase_ : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCamelCase , 'w' ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ):
import tarfile
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCamelCase , 'w' ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any ):
import lzma
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCamelCase_ : int = bytes(__UpperCamelCase , 'utf-8' )
with lzma.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : str ):
import zipfile
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCamelCase_ : int = bytes(__UpperCamelCase , 'utf-8' )
with zstd.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Dict ):
UpperCamelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCamelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase )
return filename
a_ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
a_ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
a_ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
a_ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
a_ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def __lowercase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any ):
UpperCamelCase_ : List[str] = datasets.Dataset.from_dict(__UpperCamelCase )
UpperCamelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple ):
UpperCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
UpperCamelCase_ : int = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCamelCase , 'w' , newline='' ) as f:
UpperCamelCase_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[Any] ):
UpperCamelCase_ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCamelCase , 'w' , newline='' ) as f:
UpperCamelCase_ : int = csv.DictWriter(__UpperCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : str ):
import bza
UpperCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCamelCase , 'rb' ) as f:
UpperCamelCase_ : Optional[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , 'wb' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
UpperCamelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Dict ):
UpperCamelCase_ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCamelCase_ : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCamelCase , 'wb' ) as f:
UpperCamelCase_ : Optional[Any] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
UpperCamelCase_ : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCamelCase_ : Dict = {'data': DATA}
with open(__UpperCamelCase , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCamelCase_ : int = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[int] ):
UpperCamelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCamelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCamelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Optional[Any] ):
import gzip
UpperCamelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCamelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
import gzip
UpperCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCamelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : int ):
UpperCamelCase_ : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
UpperCamelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
UpperCamelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCamelCase , 'w' ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCamelCase , 'w' ) as f:
f.add(__UpperCamelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple ):
UpperCamelCase_ : Union[str, Any] = ['0', '1', '2', '3']
UpperCamelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Tuple = ['0', '1', '2', '3']
UpperCamelCase_ : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple ):
UpperCamelCase_ : List[str] = ['0', '1', '2', '3']
UpperCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
UpperCamelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] ):
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCamelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : List[Any] ):
UpperCamelCase_ : int = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope='session' )
def __lowercase ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def __lowercase ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Tuple ):
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCamelCase , 'w' ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[str] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 175
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__A = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = True
A_ : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FlaxRoFormerModelTester(self )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__A = jnp.array([[0, 1, 2, 3, 4, 5]] )
__A = model(_lowerCamelCase )[0]
__A = 5_00_00
__A = (1, 6, vocab_size)
self.assertEqual(output.shape, _lowerCamelCase )
__A = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
| 266
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any]=False ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase = ""
else:
__lowerCamelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( A__ : int , A__ : Union[str, Any] , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = dct.pop(_lowerCAmelCase )
__lowerCamelCase = val
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = DeiTConfig()
# all deit models have fine-tuned heads
__lowerCamelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCamelCase = 1000
__lowerCamelCase = "huggingface/label-files"
__lowerCamelCase = "imagenet-1k-id2label.json"
__lowerCamelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = int(deit_name[-6:-4] )
__lowerCamelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__lowerCamelCase = 192
__lowerCamelCase = 768
__lowerCamelCase = 12
__lowerCamelCase = 3
elif deit_name[9:].startswith("""small""" ):
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
# load original model from timm
__lowerCamelCase = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCamelCase = timm_model.state_dict()
__lowerCamelCase = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
__lowerCamelCase = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowerCamelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowerCamelCase = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCamelCase = encoding["pixel_values"]
__lowerCamelCase = model(_lowerCAmelCase )
__lowerCamelCase = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A_ :
def lowercase ( self : str , snake_case_ : int , snake_case_ : int ):
pass
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : int ):
pass
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : int , snake_case_ : List[str]=None , **snake_case_ : int ):
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=None , **snake_case_ : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=None , **snake_case_ : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , **snake_case_ : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = after_output[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
def lowercase ( self : int , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int=None , **snake_case_ : str ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
_UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_pretrained_model_and_inputs()
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = after_outputs[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = TFViTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : int ):
_UpperCAmelCase = TFViTModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : List[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : int , snake_case_ : int , snake_case_ : Dict , snake_case_ : str , snake_case_ : Tuple , snake_case_ : str=None , **snake_case_ : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
_UpperCAmelCase = TFDeiTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFRobertaModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Any ):
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = TFRobertaModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = TFCLIPVisionModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFCLIPVisionModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : int ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case_ , padding=snake_case_ , return_tensors="np" )
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1e-3 ) )
| 22
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = LxmertTokenizer
lowerCamelCase = LxmertTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def _lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_lowerCAmelCase =[
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase ="""UNwant\u00E9d,running"""
_lowerCAmelCase ="""unwanted, running"""
return input_text, output_text
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.tokenizer_class(self.vocab_file )
_lowerCAmelCase =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ="""I was born in 92000, and this is falsé."""
_lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(__UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 341
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=2_4 , __magic_name__=2 , __magic_name__=6 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=None , __magic_name__=1_0_0_0 , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : Tuple = seq_length
lowerCamelCase : int = is_training
lowerCamelCase : str = use_input_mask
lowerCamelCase : List[Any] = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Any = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Dict = type_sequence_label_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : Optional[Any] = range_bbox
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase : int = bbox[i, j, 3]
lowerCamelCase : Tuple = bbox[i, j, 1]
lowerCamelCase : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase : List[str] = bbox[i, j, 2]
lowerCamelCase : Tuple = bbox[i, j, 0]
lowerCamelCase : List[str] = t
lowerCamelCase : Any = None
if self.use_input_mask:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase : Tuple = None
if self.use_token_type_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Tuple = None
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : List[Any] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Union[str, Any] = self.num_labels
lowerCamelCase : Any = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
lowerCamelCase : Dict = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
return True
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = LiltModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__magic_name__ )
lowerCamelCase : Optional[Any] = torch.tensor([[1, 2]] , device=__magic_name__ )
lowerCamelCase : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : int = model(input_ids=__magic_name__ , bbox=__magic_name__ )
lowerCamelCase : Union[str, Any] = torch.Size([1, 2, 7_6_8] )
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1e-3 ) )
| 287
|
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287
| 1
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase ):
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , tf.convert_to_tensor(UpperCamelCase ) , tf.convert_to_tensor(UpperCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(UpperCamelCase )]
A__ = [torch.tensor(UpperCamelCase )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = image_processor(UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
| 69
| 1
|
import os
lowerCAmelCase__ :List[Any] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def lowerCAmelCase__ ( a__: str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(lowerCamelCase_ ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCAmelCase__ ( a__: int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ''''''
_UpperCAmelCase = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
_UpperCAmelCase = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
_UpperCAmelCase = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCAmelCase__ ( a__: str = "/p089_roman.txt" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(lowerCamelCase_ )
_UpperCAmelCase = generate_roman_numerals(lowerCamelCase_ )
savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case : Any =2_0_4_8
__snake_case : Union[str, Any] =4_0_9_6
__snake_case : Optional[Any] =4_2
__snake_case : Dict =os.environ.pop('PROCESS_TRAIN', 'false')
__snake_case : List[str] ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
def choose_first(lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any=False):
assert isinstance(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) == 1:
lowerCAmelCase__ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : Any = {k: [a[k]] for k in a}
if len(a['''start_token''']) > 0:
break
return a
lowerCAmelCase__ : Optional[Any] = {'''id''': example['''id''']}
lowerCAmelCase__ : int = example['''annotations''']
lowerCAmelCase__ : str = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : int = ['''<cls>''']
else:
lowerCAmelCase__ : Tuple = ['''short''']
lowerCAmelCase__ : int = choose_first(annotation['''short_answers'''])
if len(out['''start_token''']) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : Optional[Any] = ['''long''']
lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = []
answer.update(lowerCamelCase_)
# disregard some samples
if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Optional[Any] = True
else:
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols):
raise ValueError('''Issue in ID''' ,example['''id'''])
return answer
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=False):
'''simple docstring'''
lowerCAmelCase__ : Any = _get_single_answer(lowerCamelCase_)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Any = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : Union[str, Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Optional[Any] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
lowerCAmelCase__ : int = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : List[Any] = ''' '''.join(context[start_token:end_token])
# checking above code
if assertion:
lowerCAmelCase__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]])
if new != old:
print('''ID:''' ,example['''id'''])
print('''New:''' ,lowerCamelCase_ ,end='''\n''')
print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''')
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple=2048 ,lowerCamelCase_ : Dict=4096 ,lowerCamelCase_ : Optional[Any]=True):
'''simple docstring'''
lowerCAmelCase__ : int = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : Union[str, Any] = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
for i in doc_start_indices:
lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len
lowerCAmelCase__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer['''category'''][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase_),
"end_token": [-100] * len(lowerCamelCase_),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out['''context'''].split()
lowerCAmelCase__ : Union[str, Any] = splitted_context[answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids)
lowerCAmelCase__ : Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : int = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : Union[str, Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowerCAmelCase__ : List[str] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
if assertion:
lowerCAmelCase__ : int = tokenizer.decode(lowerCamelCase_)
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''')
print('''OLD:''' ,answer['''span'''])
print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''')
if len(lowerCamelCase_) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : Optional[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : str = i + max_length - q_len
lowerCAmelCase__ : List[str] = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : int = start_token - i + q_len
lowerCAmelCase__ : str = end_token - i + q_len
answers_category.append(answer['''category'''][0]) # ["short"] -> "short"
else:
lowerCAmelCase__ : Tuple = -100
lowerCAmelCase__ : List[str] = -100
answers_category.append('''null''')
lowerCAmelCase__ : int = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_)
answers_end_token.append(lowerCamelCase_)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' ,example['''id'''])
print('''New:''' ,tokenizer.decode(lowerCamelCase_))
print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''')
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int=2048 ,lowerCamelCase_ : Tuple=4096 ,lowerCamelCase_ : Optional[int]=False):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_strided_contexts_and_ans(
lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,)
return example
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int):
'''simple docstring'''
with jsonlines.open(lowerCamelCase_ ,'''a''') as writer:
for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''):
lowerCAmelCase__ : Optional[Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case : Optional[int] =load_dataset('natural_questions')
__snake_case : Union[str, Any] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__snake_case : Tuple =data['train' if PROCESS_TRAIN == 'true' else 'validation']
__snake_case : Optional[int] ={
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__snake_case : Dict =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case : Dict =data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 129
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_ : str = False, False, False
@dataclass
class _snake_case :
_lowercase : Optional[int] = None
_lowercase : bool = True
_lowercase : bool = True
_lowercase : Optional[str] = None
# Automatically constructed
_lowercase : ClassVar[str] = "dict"
_lowercase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : str = field(default='''Audio''' , init=A__ , repr=A__ )
def __call__( self) -> Optional[int]:
return self.pa_type
def SCREAMING_SNAKE_CASE__ ( self , a) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.') from err
if isinstance(a , a):
return {"bytes": None, "path": value}
elif isinstance(a , a):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE = BytesIO()
sf.write(a , value['array'] , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm'):
# "PCM" only has raw audio bytes
if value.get('sampling_rate') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object')
if value.get('bytes'):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE = np.frombuffer(value['bytes'] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
SCREAMING_SNAKE_CASE = np.memmap(value['path'] , dtype='h' , mode='r').astype(np.floataa) / 3_2767
SCREAMING_SNAKE_CASE = BytesIO(bytes())
sf.write(a , a , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (value['path'], BytesIO(value['bytes'])) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.') from err
SCREAMING_SNAKE_CASE = xsplitext(a)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
if file is None:
SCREAMING_SNAKE_CASE = token_per_repo_id or {}
SCREAMING_SNAKE_CASE = path.split('::')[-1]
try:
SCREAMING_SNAKE_CASE = string_to_dict(a , config.HUB_DATASETS_URL)['repo_id']
SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE = None
with xopen(a , 'rb' , use_auth_token=a) as f:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sf.read(a)
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sf.read(a)
SCREAMING_SNAKE_CASE = array.T
if self.mono:
SCREAMING_SNAKE_CASE = librosa.to_mono(a)
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE = librosa.resample(a , orig_sr=a , target_sr=self.sampling_rate)
SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.')
return {
"bytes": Value('binary'),
"path": Value('string'),
}
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
if pa.types.is_string(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('array'):
SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(a) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
SCREAMING_SNAKE_CASE = storage.field('bytes')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
SCREAMING_SNAKE_CASE = storage.field('path')
else:
SCREAMING_SNAKE_CASE = pa.array([None] * len(a) , type=pa.string())
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
return array_cast(a , self.pa_type)
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(a):
with xopen(a , 'rb') as f:
SCREAMING_SNAKE_CASE = f.read()
return bytes_
SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(a) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(a , self.pa_type)
| 366
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ (_UpperCAmelCase):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _snake_case ( nn.Module ):
def __init__( self , a , a) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a) , nn.Linear(a , module.out_features , bias=a) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any:
return self.module(a , *a , **a) + self.adapter(a)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : str = 2.109_6595_5269_2574
_lowercase : Any = '''Hello my name is'''
_lowercase : Any = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : Union[str, Any] = 10
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , 'quantization_config'))
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
with self.assertRaises(a), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaises(a):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(a):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa)
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to('cpu')
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
SCREAMING_SNAKE_CASE = 't5-small'
SCREAMING_SNAKE_CASE = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name)
SCREAMING_SNAKE_CASE = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
SCREAMING_SNAKE_CASE = modules
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map='auto')
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a)):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a)
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(a , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _snake_case ( A__ ):
_lowercase : str = '''gpt2-xl'''
_lowercase : Union[str, Any] = 3.3191_8548_5415_2187
| 327
| 0
|
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def a_ ( self) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ = field(default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE_ = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def a_ ( self) -> Optional[Any]:
if self.train_file is not None:
snake_case_ = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
snake_case_ = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = {c: dataset[c] for c in dataset.column_names}
snake_case_ = refs
return Dataset.from_dict(UpperCAmelCase )
def UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
if extension == "txt":
snake_case_ = 'text'
snake_case_ = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
snake_case_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
snake_case_ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
snake_case_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
snake_case_ = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ = datasets['train'].column_names
else:
snake_case_ = datasets['validation'].column_names
snake_case_ = 'text' if 'text' in column_names else column_names[0]
snake_case_ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
snake_case_ = [line for line in examples['text'] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
snake_case_ = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ = model_args.model_name_or_path
else:
snake_case_ = None
snake_case_ = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = math.exp(eval_output['eval_loss'] )
snake_case_ = perplexity
snake_case_ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 69
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29
| 0
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
_snake_case : Optional[Any] = "EncodecFeatureExtractor"
_snake_case : List[Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> int:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
_UpperCamelCase = kwargs.pop('''audio''' , _lowercase )
_UpperCamelCase = kwargs.pop('''sampling_rate''' , _lowercase )
_UpperCamelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_UpperCamelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
_UpperCamelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCamelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_UpperCamelCase = audio_inputs['''padding_mask''']
return inputs
def snake_case__ ( self : str , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''audio''' , _lowercase )
_UpperCamelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def snake_case__ ( self : Dict , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional = None ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = to_numpy(_lowercase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
_UpperCamelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCamelCase = seq_len - padding_mask.shape[-1]
_UpperCamelCase = 1 - self.feature_extractor.padding_value
_UpperCamelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
_UpperCamelCase = audio_values.tolist()
for i in range(_lowercase ):
_UpperCamelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCamelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 370
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 287
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = LxmertTokenizer
lowerCAmelCase_ = LxmertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def lowercase (self ) -> Union[str, Any]:
super().setUp()
_snake_case = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase (self , UpperCAmelCase ) -> Any:
_snake_case = """UNwant\u00E9d,running"""
_snake_case = """unwanted, running"""
return input_text, output_text
def lowercase (self ) -> int:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase (self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = """I was born in 92000, and this is falsé."""
_snake_case = tokenizer.tokenize(UpperCAmelCase )
_snake_case = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_snake_case = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_snake_case = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(UpperCAmelCase )
_snake_case = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
| 341
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "deberta-v2"
def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = relative_attention
_snake_case = max_relative_positions
_snake_case = pad_token_id
_snake_case = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
_snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_snake_case = pos_att_type
_snake_case = vocab_size
_snake_case = layer_norm_eps
_snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase )
_snake_case = pooler_dropout
_snake_case = pooler_hidden_act
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase (self ) -> int:
return 12
def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]:
_snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 341
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_A = HfApi()
_A = {}
# fmt: off
_A = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_A = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_A = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_A = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_A = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_A = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_A = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_A = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_A = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_A = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_A = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_A = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_A = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_A = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_A = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_A = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_A = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
_A = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_A = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_A = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_A = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_A = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 359
|
"""simple docstring"""
import sys
import turtle
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_A = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 166
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "beit"
def __init__( self, lowerCAmelCase__=8192, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=True, lowerCAmelCase__=[3, 5, 7, 11], lowerCAmelCase__=[1, 2, 3, 6], lowerCAmelCase__=True, lowerCAmelCase__=0.4, lowerCAmelCase__=256, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=255, **lowerCAmelCase__, ) -> List[str]:
super().__init__(**lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 69
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
set_seed(770)
A_ : Optional[int] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
A_ : int = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
A_ : str = os.path.dirname(os.path.abspath(__file__))
A_ : int = os.path.join(os.path.expanduser('~'), '.cache')
A_ : Tuple = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Tuple=False ) -> Union[str, Any]:
A__ : Dict = model_type
if use_small:
key += "_small"
return os.path.join(lowercase_ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
hf_hub_download(repo_id=lowercase_ , filename=lowercase_ , local_dir=lowercase_ )
def UpperCamelCase (lowercase_: str , lowercase_: List[str] , lowercase_: Any=False , lowercase_: int="text" ) -> Tuple:
if model_type == "text":
A__ : str = BarkSemanticModel
A__ : Dict = BarkSemanticConfig
A__ : Tuple = BarkSemanticGenerationConfig
elif model_type == "coarse":
A__ : int = BarkCoarseModel
A__ : Union[str, Any] = BarkCoarseConfig
A__ : int = BarkCoarseGenerationConfig
elif model_type == "fine":
A__ : Union[str, Any] = BarkFineModel
A__ : Any = BarkFineConfig
A__ : Union[str, Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
A__ : Union[str, Any] = f"""{model_type}_small""" if use_small else model_type
A__ : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
A__ : Any = torch.load(lowercase_ , map_location=lowercase_ )
# this is a hack
A__ : Dict = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
A__ : Dict = model_args["""vocab_size"""]
A__ : Optional[Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
A__ : Union[str, Any] = model_args.pop("""n_head""" )
A__ : str = model_args.pop("""n_embd""" )
A__ : Dict = model_args.pop("""n_layer""" )
A__ : str = ConfigClass(**checkpoint["""model_args"""] )
A__ : Optional[int] = ModelClass(config=lowercase_ )
A__ : Dict = GenerationConfigClass()
A__ : Tuple = model_generation_config
A__ : int = checkpoint["""model"""]
# fixup checkpoint
A__ : List[Any] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowercase_ ):
# replace part of the key with corresponding layer name in HF implementation
A__ : Any = k[len(lowercase_ ) :]
for old_layer_name in new_layer_name_dict:
A__ : Any = new_k.replace(lowercase_ , new_layer_name_dict[old_layer_name] )
A__ : Optional[int] = state_dict.pop(lowercase_ )
A__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
A__ : int = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
A__ : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
A__ : Optional[Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowercase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowercase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase_ , strict=lowercase_ )
A__ : int = model.num_parameters(exclude_embeddings=lowercase_ )
A__ : Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase_ , 3 )} loss""" )
model.eval()
model.to(lowercase_ )
del checkpoint, state_dict
return model
def UpperCamelCase (lowercase_: Tuple , lowercase_: Any=False , lowercase_: Optional[Any]="text" ) -> Union[str, Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
A__ : Union[str, Any] = """cpu""" # do conversion on cpu
A__ : List[str] = _get_ckpt_path(lowercase_ , use_small=lowercase_ )
A__ : str = _load_model(lowercase_ , lowercase_ , model_type=lowercase_ , use_small=lowercase_ )
# load bark initial model
A__ : Union[str, Any] = _bark_load_model(lowercase_ , """cpu""" , model_type=lowercase_ , use_small=lowercase_ )
if model_type == "text":
A__ : str = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowercase_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
A__ : List[Any] = 5
A__ : List[Any] = 10
if model_type in ["text", "coarse"]:
A__ : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
A__ : Union[str, Any] = bark_model(lowercase_ )[0]
A__ : Any = model(lowercase_ )
# take last logits
A__ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
A__ : Optional[int] = 3
A__ : Optional[int] = 8
A__ : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
A__ : List[Any] = model(lowercase_ , lowercase_ )
A__ : Union[str, Any] = bark_model(lowercase_ , lowercase_ )
A__ : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Any , lowercase_: List[Any] , lowercase_: Optional[int] , lowercase_: int , lowercase_: List[str] , ) -> Any:
A__ : Tuple = os.path.join(lowercase_ , lowercase_ )
A__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowercase_ , """config.json""" ) )
A__ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowercase_ , """config.json""" ) )
A__ : str = BarkFineConfig.from_pretrained(os.path.join(lowercase_ , """config.json""" ) )
A__ : str = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
A__ : str = BarkSemanticModel.from_pretrained(lowercase_ )
A__ : List[str] = BarkCoarseModel.from_pretrained(lowercase_ )
A__ : Tuple = BarkFineModel.from_pretrained(lowercase_ )
A__ : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
A__ : int = BarkConfig.from_sub_model_configs(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
A__ : Any = BarkModel(lowercase_ )
A__ : str = semantic
A__ : Optional[Any] = coarseAcoustic
A__ : int = fineAcoustic
A__ : Any = codec
A__ : str = bark_generation_config
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
bark.save_pretrained(lowercase_ , repo_id=lowercase_ , push_to_hub=lowercase_ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
A_ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 141
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''falcon'''
UpperCAmelCase__: Any = ['''past_key_values''']
def __init__( self , A__=6_5024 , A__=4544 , A__=32 , A__=71 , A__=1e-5 , A__=0.0_2 , A__=True , A__=0.0 , A__=0.0 , A__=None , A__=False , A__=False , A__=True , A__=True , A__=False , A__=11 , A__=11 , **A__ , ):
A__ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
A__ : Union[str, Any] = kwargs.pop("""n_embed""" , A__ )
A__ : Optional[Any] = hidden_size if n_embed is None else n_embed
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[Any] = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : Tuple = use_cache
A__ : str = hidden_dropout
A__ : List[str] = attention_dropout
A__ : List[Any] = bos_token_id
A__ : Optional[Any] = eos_token_id
A__ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : List[str] = alibi
A__ : Tuple = new_decoder_architecture
A__ : List[str] = multi_query # Ignored when new_decoder_architecture is True
A__ : List[Any] = parallel_attn
A__ : int = bias
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@property
def __A ( self ):
return self.hidden_size // self.num_attention_heads
@property
def __A ( self ):
return not self.alibi
| 141
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : List[Any], _lowerCamelCase : int=13, _lowerCamelCase : str=3, _lowerCamelCase : Optional[Any]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : int=4_00, _lowerCamelCase : str=True, _lowerCamelCase : List[str]=None, _lowerCamelCase : Tuple=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Any=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'height': 18, 'width': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A, '''image_mean''' ) )
self.assertTrue(hasattr(_A, '''image_std''' ) )
self.assertTrue(hasattr(_A, '''do_normalize''' ) )
self.assertTrue(hasattr(_A, '''do_resize''' ) )
self.assertTrue(hasattr(_A, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A, numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A, torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287
| 0
|
from typing import Any
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = data
_snake_case = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class UpperCamelCase_ :
def __init__( self ) -> Union[str, Any]:
_snake_case = None
def __iter__( self ) -> Any:
_snake_case = self.head
while node:
yield node.data
_snake_case = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowerCAmelCase_ ) for item in self] )
def __getitem__( self , lowerCAmelCase_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_snake_case = self.head
for _ in range(lowerCAmelCase_ ):
_snake_case = current.next
_snake_case = data
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
self.insert_nth(len(self ) , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
self.insert_nth(0 , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_snake_case = Node(lowerCAmelCase_ )
if self.head is None:
_snake_case = new_node
elif index == 0:
_snake_case = self.head # link new_node to head
_snake_case = new_node
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = new_node
def lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase ( self , lowerCAmelCase_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_snake_case = self.head # default first node
if index == 0:
_snake_case = self.head.next
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = temp.next.next
return delete_node.data
def lowerCAmelCase ( self ) -> bool:
return self.head is None
def lowerCAmelCase ( self ) -> None:
_snake_case = None
_snake_case = self.head
while current:
# Store the current node's next node.
_snake_case = current.next
# Make the current node's next point backwards
_snake_case = prev
# Make the previous node be the current node
_snake_case = current
# Make the current node the next node (to progress iteration)
_snake_case = next_node
# Return prev in order to put the head at the end
_snake_case = prev
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase__ ) == i
linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase__ ) == 9
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(-8 , 1 ) )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase__ )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
_snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCamelCase__ )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
_snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCamelCase__ )
print(F'''length of linked_list is : {len(UpperCamelCase__ )}''' )
if __name__ == "__main__":
main()
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ :int = None
A_ :Optional[int] = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :int = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
A_ :int = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : Any =MBartTokenizer
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : int =vocab_file
__UpperCamelCase : Tuple =False if not self.vocab_file else True
__UpperCamelCase : Dict =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__UpperCamelCase : List[str] ={
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCamelCase : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase : str =self.convert_tokens_to_ids(self._src_lang )
__UpperCamelCase : str =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self.sep_token_id]
__UpperCamelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase : List[str] =src_lang
__UpperCamelCase : List[str] =self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Dict =src_lang
__UpperCamelCase : Optional[Any] =tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[Any] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : Union[str, Any] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : int =[]
__UpperCamelCase : List[str] =[self.eos_token_id, self.cur_lang_code]
__UpperCamelCase : Optional[int] =self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase : Dict =self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__UpperCamelCase : List[Any] =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 71
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """blenderbot-small"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : str=1_6 , _lowerCAmelCase : Optional[int]=8 , _lowerCAmelCase : str=2_0_4_8 , _lowerCAmelCase : Dict=1_6 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Any=2 , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
elif self.task == "causal-lm":
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
| 166
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Optional[int] ):
lowercase_ :Optional[int] = []
lowercase_ :Optional[int] = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
lowercase_ :Optional[Any] = subprocess.run(__lowerCamelCase ,shell=__lowerCamelCase ,stdout=subprocess.PIPE )
lowercase_ :int = output.stdout.decode("utf-8" )
lowercase_ :Dict = json.loads(__lowerCamelCase )
lowercase_ :int = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowerCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" ,"w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
lowercase_ :Optional[Any] = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ):
return values.split("," )
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCAmelCase : Optional[int] =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 147
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def snake_case ( *__lowercase : int , **__lowercase : Dict ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Tuple ):
"""simple docstring"""
__lowercase =pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase =[
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def snake_case ( self : Optional[int] , __lowercase : str , __lowercase : Dict ):
"""simple docstring"""
__lowercase =vqa_pipeline(__lowercase , top_k=1 )
self.assertEqual(
__lowercase , [
[{'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}],
[{'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}],
] , )
@require_torch
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase ='./tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase ='How many cats are there?'
__lowercase =vqa_pipeline(image=__lowercase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
__lowercase , [{'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}, {'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}] )
__lowercase =vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
__lowercase , [{'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}, {'score': ANY(__lowercase ), 'answer': ANY(__lowercase )}] )
@slow
@require_torch
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
__lowercase ='./tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase ='How many cats are there?'
__lowercase =vqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
__lowercase =vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
__lowercase =vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def snake_case ( self : Dict ):
"""simple docstring"""
pass
| 141
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : list, lowercase__ : list, lowercase__ : int ):
'''simple docstring'''
__lowercase =len(lowercase__ )
__lowercase =[[0] * n for i in range(lowercase__ )]
for i in range(lowercase__ ):
__lowercase =y_points[i]
for i in range(2, lowercase__ ):
for j in range(lowercase__, lowercase__ ):
__lowercase =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 1
|
"""simple docstring"""
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase ( UpperCamelCase__=None ):
"""simple docstring"""
A__ = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ )
# The main config parser
A__ = config_command_parser(UpperCamelCase__ )
# The subparser to add commands to
A__ = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase__ , parents=[parent_parser] )
update_command_parser(UpperCamelCase__ , parents=[parent_parser] )
return config_parser
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = get_config_parser()
A__ = config_parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.json"}
__lowerCamelCase = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__lowerCamelCase = {"mgp-str": 27}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="[GO]" ,__UpperCAmelCase="[GO]" ,__UpperCAmelCase="[s]" ,__UpperCAmelCase="[GO]" ,**__UpperCAmelCase ) -> List[str]:
super().__init__(
unk_token=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
with open(__UpperCAmelCase ,encoding='utf-8' ) as vocab_handle:
A__ = json.load(__UpperCAmelCase )
A__ = {v: k for k, v in self.vocab.items()}
@property
def snake_case__ ( self ) -> Any:
return len(self.vocab )
def snake_case__ ( self ) -> List[str]:
return dict(self.vocab ,**self.added_tokens_encoder )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = []
for s in text:
char_tokens.extend(__UpperCAmelCase )
return char_tokens
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.vocab.get(__UpperCAmelCase ,self.vocab.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
return self.decoder.get(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCAmelCase ) )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + '\n' )
return (vocab_file,)
| 154
| 0
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case__ : Dict = False
try:
snake_case__ : Any = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : str = None , UpperCamelCase_ : list = [] ):
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : str = choices
lowerCAmelCase : Any = prompt
if sys.platform == "win32":
lowerCAmelCase : Optional[Any] = '''*'''
else:
lowerCAmelCase : List[str] = '''➔ '''
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Direction , UpperCamelCase_ : int = 1 ):
lowerCAmelCase : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def lowerCamelCase__ ( self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def lowerCamelCase__ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def lowerCamelCase__ ( self : List[str] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def lowerCamelCase__ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(1_0 )] )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = int(chr(self.current_selection ) )
lowerCAmelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowerCAmelCase : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCAmelCase : List[str] = int(builtins.input() )
except ValueError:
lowerCAmelCase : Optional[int] = default_choice
else:
lowerCAmelCase : Tuple = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 60
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'bart'
_UpperCAmelCase :str = ['past_key_values']
_UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : Optional[int] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Tuple = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : List[str] = decoder_layerdrop
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ):
UpperCamelCase : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase : List[str] = {0: "batch"}
UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers
for i in range(A_ ):
UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super().outputs
else:
UpperCamelCase : Dict = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase : int = self.num_layers
for i in range(A_ ):
UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase : List[Any] = seq_length if not self.use_past else 1
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[Any] = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers
UpperCamelCase : Any = min(A_ , A_ )
UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers
UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads
UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype
UpperCamelCase : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase : Optional[Any] = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 52
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
_UpperCAmelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 170
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ : Dict = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = '''cpu'''
lowerCAmelCase_ : List[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ : List[Any] = '''path-to-your-trained-model'''
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ : str = pipe.to(device)
# to channels last
lowerCAmelCase_ : Dict = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ : str = torch.randn(2, 4, 64, 64)
lowerCAmelCase_ : str = torch.rand(1) * 999
lowerCAmelCase_ : Any = torch.randn(2, 77, 768)
lowerCAmelCase_ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ : str = 666
lowerCAmelCase_ : int = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ : Dict = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 170
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( A_ ):
def __init__(self , *lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
__lowercase= eval_examples
__lowercase= post_process_function
def _A (self , lowerCAmelCase = None , lowerCAmelCase=None , lowerCAmelCase = None , lowerCAmelCase = "eval" , **lowerCAmelCase , ):
__lowercase= gen_kwargs.copy()
__lowercase= (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
__lowercase= (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
__lowercase= gen_kwargs
__lowercase= self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase= self.get_eval_dataloader(lowerCAmelCase )
__lowercase= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase= self.compute_metrics
__lowercase= None
__lowercase= time.time()
__lowercase= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase= eval_loop(
lowerCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase , metric_key_prefix=lowerCAmelCase , )
finally:
__lowercase= compute_metrics
__lowercase= self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase , lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase= self.post_process_function(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= self.compute_metrics(lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase= metrics.pop(lowerCAmelCase )
metrics.update(output.metrics )
else:
__lowercase= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase= self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase )
return metrics
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase = "test" , **lowerCAmelCase ):
__lowercase= gen_kwargs.copy()
__lowercase= self.get_test_dataloader(lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase= self.compute_metrics
__lowercase= None
__lowercase= time.time()
__lowercase= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase= eval_loop(
lowerCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase , metric_key_prefix=lowerCAmelCase , )
finally:
__lowercase= compute_metrics
__lowercase= self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase , lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase= self.post_process_function(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , 'predict' )
__lowercase= self.compute_metrics(lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase= metrics.pop(lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase )
| 295
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 365
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = 0
if start < end:
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ , UpperCAmelCase_ = _in_place_partition(snake_case_ , snake_case_ , snake_case_ )
count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 )
count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ = start - 1
for index in range(snake_case_ , snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ = new_pivot_index + 1
UpperCAmelCase_ = a[new_pivot_index]
UpperCAmelCase_ = a[index]
UpperCAmelCase_ = temp
UpperCAmelCase_ = a[new_pivot_index + 1]
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_: List[str] =TemporaryFile()
SCREAMING_SNAKE_CASE_: int =1_00 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_: List[str] =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_: str =np.load(outfile)
SCREAMING_SNAKE_CASE_: List[Any] =len(M) - 1
SCREAMING_SNAKE_CASE_: Dict =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 106
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = StableDiffusionInpaintPipeline
A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
def __snake_case (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
UpperCAmelCase_: Tuple = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
UpperCAmelCase_: str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="""gelu""", projection_dim=512, )
UpperCAmelCase_: Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase_: Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase_: Optional[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCAmelCase_: str = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCAmelCase_: Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __snake_case (self ) -> int:
UpperCAmelCase_: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: List[Any] = self.get_dummy_components()
UpperCAmelCase_: List[Any] = StableDiffusionInpaintPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_: Optional[int] = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case (self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCAmelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCAmelCase_: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
UpperCAmelCase_: str = """stabilityai/stable-diffusion-2-inpainting"""
UpperCAmelCase_: List[str] = StableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Tuple = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCAmelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type="""np""", )
UpperCAmelCase_: int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCAmelCase_: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCAmelCase_: Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
UpperCAmelCase_: Union[str, Any] = """stabilityai/stable-diffusion-2-inpainting"""
UpperCAmelCase_: str = StableDiffusionInpaintPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_, torch_dtype=torch.floataa, safety_checker=SCREAMING_SNAKE_CASE_, )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Dict = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCAmelCase_: Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type="""np""", )
UpperCAmelCase_: List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __snake_case (self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCAmelCase_: List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCAmelCase_: Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
UpperCAmelCase_: str = PNDMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_, subfolder="""scheduler""" )
UpperCAmelCase_: Dict = StableDiffusionInpaintPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, torch_dtype=torch.floataa, )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: List[str] = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type="""np""", )
UpperCAmelCase_: int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 147
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = '▁'
a : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
a : Optional[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a : Any = {
'facebook/xglm-564M': 2_048,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCAmelCase_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_: Optional[int] = 7
UpperCAmelCase_: Dict = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCAmelCase_: List[Any] = kwargs.get("""additional_special_tokens""", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_: Dict = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_: Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase_: Union[str, Any] = len(self.sp_model )
UpperCAmelCase_: Optional[int] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Any:
UpperCAmelCase_: List[Any] = self.__dict__.copy()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
UpperCAmelCase_: int = {}
UpperCAmelCase_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_: List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __snake_case (self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_: str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, """ """ ).strip()
return out_string
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, """wb""" ) as fi:
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 147
| 1
|
"""simple docstring"""
def __lowercase ( _a ):
if not isinstance(_a , _a ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
snake_case_ : Tuple = str(_a )
snake_case_ : str = ''''''.join(sorted(_a ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowercase ( _a = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
snake_case_ : int = 0
snake_case_ : List[str] = 1
while True:
if check_bouncy(_a ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 357
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a_ ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =s.rsplit(_A , _A )
return new.join(_A )
def a_ ( __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def a_ ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase_ =key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase_ =key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
lowerCamelCase_ =rreplace(_A , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
lowerCamelCase_ =rreplace(_A , '''.b''' , '''.bias''' , 1 )
lowerCamelCase_ =value.float()
return upgrade
@torch.no_grad()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Dict=True ) -> Optional[int]:
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase_ =Encoder()
if os.path.exists(_A ):
lowerCamelCase_ =torch.load(_A )
else:
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCamelCase_ =ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCamelCase_ =FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCamelCase_ =FlavaImageCodebookConfig()
lowerCamelCase_ =FlavaImageCodebook(_A ).eval()
lowerCamelCase_ =encoder.state_dict()
lowerCamelCase_ =upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCamelCase_ =hf_model.state_dict()
lowerCamelCase_ =count_parameters(_A )
lowerCamelCase_ =count_parameters(_A )
assert torch.allclose(_A , _A , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a_ : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 75
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Any = logging.get_logger(__name__)
__A : Dict = {'vocab_file': 'spiece.model'}
__A : List[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> None:
lowerCamelCase_ =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =3
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =remove_space
lowerCamelCase_ =keep_accents
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase_ =jieba
lowerCamelCase_ =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self )-> Any:
return len(self.sp_model )
def _snake_case ( self )-> Dict:
lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> List[Any]:
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
if self.remove_space:
lowerCamelCase_ =""" """.join(inputs.strip().split() )
else:
lowerCamelCase_ =inputs
lowerCamelCase_ =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase_ =unicodedata.normalize("""NFKD""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""""".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCamelCase_ =outputs.lower()
return outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase_ =self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ =cur_pieces[1:]
else:
lowerCamelCase_ =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ ="""""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ =os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =super()._decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 154
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=0 ):
"""simple docstring"""
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
with FSDP.state_dict_type(
UpperCamelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case = os.path.join(UpperCamelCase_ ,F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCamelCase_ ,storage_writer=dist_cp.FileSystemWriter(UpperCamelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCamelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case = torch.load(UpperCamelCase_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case = torch.load(UpperCamelCase_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case = (
os.path.join(UpperCamelCase_ ,F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCamelCase_ ,storage_reader=dist_cp.FileSystemReader(UpperCamelCase_ ) ,planner=DefaultLoadPlanner() ,)
snake_case = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=0 ):
"""simple docstring"""
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
with FSDP.state_dict_type(
UpperCamelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
snake_case = FSDP.optim_state_dict(UpperCamelCase_ ,UpperCamelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case = os.path.join(UpperCamelCase_ ,F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} ,storage_writer=dist_cp.FileSystemWriter(UpperCamelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case = torch.load(UpperCamelCase_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case = (
os.path.join(UpperCamelCase_ ,F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key='''optimizer''' ,storage_reader=dist_cp.FileSystemReader(UpperCamelCase_ ) ,)
snake_case = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case = FSDP.optim_state_dict_to_load(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
optimizer.load_state_dict(UpperCamelCase_ )
| 213
|
import argparse
import copy
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
with open(UpperCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
with open(UpperCamelCase_ ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase_ ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(UpperCamelCase_ )
snake_case = distance_of_first_solution + int(UpperCamelCase_ )
snake_case = best_node
first_solution.append(UpperCamelCase_ )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
for kn in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
if n == kn:
continue
snake_case = copy.deepcopy(UpperCamelCase_ )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(UpperCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(UpperCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(UpperCamelCase_ ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(UpperCamelCase_ ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(UpperCamelCase_ ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ (UpperCamelCase_=None ):
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File ,UpperCamelCase_ )
snake_case , snake_case = tabu_search(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,args.Iterations ,args.Size ,)
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 213
| 1
|
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : List[str]) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""")
for i in range(_lowercase):
for j in range(_lowercase):
if dist[i][j] != float("""inf"""):
print(int(dist[i][j]) , end="""\t""")
else:
print("""INF""" , end="""\t""")
print()
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Any) -> List[str]:
"""simple docstring"""
a__ : List[Any] = [[float("""inf""") for _ in range(_lowercase)] for _ in range(_lowercase)]
for i in range(_lowercase):
for j in range(_lowercase):
a__ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowercase):
# looping through rows of graph array
for i in range(_lowercase):
# looping through columns of graph array
for j in range(_lowercase):
if (
dist[i][k] != float("""inf""")
and dist[k][j] != float("""inf""")
and dist[i][k] + dist[k][j] < dist[i][j]
):
a__ : Dict = dist[i][k] + dist[k][j]
_print_dist(_lowercase , _lowercase)
return dist, v
if __name__ == "__main__":
_lowercase : Any =int(input("Enter number of vertices: "))
_lowercase : str =int(input("Enter number of edges: "))
_lowercase : str =[[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
_lowercase : Optional[Any] =0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
_lowercase : int =int(input("Enter source:"))
_lowercase : str =int(input("Enter destination:"))
_lowercase : List[Any] =float(input("Enter weight:"))
_lowercase : int =weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 170
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> int:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """vision""" )
self.check_model_type(__lowercase )
def __call__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_image(__lowercase )
a__ : Optional[int] = image.size
a__ : int = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = model_outputs.predicted_depth
a__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowercase )
a__ : Union[str, Any] = prediction.squeeze().cpu().numpy()
a__ : List[str] = (output * 2_5_5 / np.max(__lowercase )).astype("""uint8""" )
a__ : Any = Image.fromarray(__lowercase )
a__ : List[str] = {}
a__ : Tuple = predicted_depth
a__ : Any = depth
return output_dict
| 170
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : str )-> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case__ ( self : Union[str, Any],lowercase_ : List[List[List[str]]],lowercase_ : List[List[str]],lowercase_ : int = 1,lowercase_ : int = 4,)-> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_,hypotheses=lowercase_,min_len=lowercase_,max_len=lowercase_ )
}
| 282
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[str],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : int = 3_2,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073],lowercase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711],lowercase_ : bool = True,lowercase_ : Tuple=7,lowercase_ : str=3_0,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Dict=3,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = do_resize
A__ = size if size is not None else {'shortest_edge': 2_8_8}
A__ = size_divisor
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = do_center_crop
A__ = image_mean
A__ = image_std
A__ = do_pad
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self : int,lowercase_ : Optional[int],lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if not batched:
A__ = self.size['shortest_edge']
A__ = image_inputs[0]
if isinstance(lowercase_,Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
A__ = size / min(lowercase_,lowercase_ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
A__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase_,lowercase_ ) > max_size:
A__ = max_size / max(lowercase_,lowercase_ )
A__ = newh * scale
A__ = neww * scale
A__ , A__ = int(newh + 0.5 ), int(neww + 0.5 )
A__ , A__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase_,key=lambda lowercase_ : item[0] )[0]
A__ = max(lowercase_,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_,'image_mean' ) )
self.assertTrue(hasattr(lowercase_,'image_std' ) )
self.assertTrue(hasattr(lowercase_,'do_normalize' ) )
self.assertTrue(hasattr(lowercase_,'do_resize' ) )
self.assertTrue(hasattr(lowercase_,'size' ) )
self.assertTrue(hasattr(lowercase_,'size_divisor' ) )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 282
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''swinv2'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=9_6 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 1_2, 2_4] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=3_2 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(lowerCAmelCase__) - 1))
__SCREAMING_SNAKE_CASE = (0, 0, 0, 0)
| 100
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : Any=1_0_0 ,lowercase_ : str=1_3 ,lowercase_ : Any=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=3 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=5 ,lowercase_ : Any=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Dict=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : int=0.02 ,lowercase_ : List[str]=3 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Any = FlaxBeitModel(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : Dict = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(lowercase_ )
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Dict ,**lowercase_ : List[Any] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Any = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase__ : str = np.ones((1, 1_9_6) ,dtype=lowercase_ )
# forward pass
lowerCAmelCase__ : Dict = model(pixel_values=lowercase_ ,bool_masked_pos=lowercase_ )
lowerCAmelCase__ : str = outputs.logits
# verify the logits
lowerCAmelCase__ : Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowercase_ ,atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Dict = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
| 106
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Any = 5
# Realm tok
lowerCAmelCase__ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,'''realm_tokenizer''' )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = os.path.join(lowercase_ ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname ,'''realm_block_records''' )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''realm_tokenizer''' ) )
def __lowerCAmelCase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Tuple = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : List[str] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Dict = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] ,dtype=lowercase_ ,)
return block_records
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : int = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.get_config()
lowerCAmelCase__ : List[str] = self.get_dummy_retriever()
lowerCAmelCase__ : Any = retriever.tokenizer
lowerCAmelCase__ : Optional[int] = np.array([0, 3] ,dtype='''long''' )
lowerCAmelCase__ : Optional[Any] = tokenizer(['''Test question'''] ).input_ids
lowerCAmelCase__ : List[Any] = tokenizer(
['''the fourth'''] ,add_special_tokens=lowercase_ ,return_token_type_ids=lowercase_ ,return_attention_mask=lowercase_ ,).input_ids
lowerCAmelCase__ : List[str] = config.reader_seq_len
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = retriever(
lowercase_ ,lowercase_ ,answer_ids=lowercase_ ,max_length=lowercase_ ,return_tensors='''np''' )
self.assertEqual(len(lowercase_ ) ,2 )
self.assertEqual(len(lowercase_ ) ,2 )
self.assertEqual(len(lowercase_ ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[str] = self.get_config()
lowerCAmelCase__ : str = self.get_dummy_retriever()
lowerCAmelCase__ : int = retriever.tokenizer
lowerCAmelCase__ : Optional[int] = np.array([0, 3, 5] ,dtype='''long''' )
lowerCAmelCase__ : Union[str, Any] = tokenizer(['''Test question'''] ).input_ids
lowerCAmelCase__ : Tuple = tokenizer(
['''the fourth''', '''longer longer'''] ,add_special_tokens=lowercase_ ,return_token_type_ids=lowercase_ ,return_attention_mask=lowercase_ ,).input_ids
lowerCAmelCase__ : Union[str, Any] = config.reader_seq_len
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = retriever(
lowercase_ ,lowercase_ ,answer_ids=lowercase_ ,max_length=lowercase_ ,return_tensors='''np''' )
self.assertEqual([False, True, True] ,lowercase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,lowercase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,'''realm_block_records''' ) )
# Test local path
lowerCAmelCase__ : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname ,'''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] ,B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowerCAmelCase__ : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname ,'''realm_block_records''' ) ,_REALM_BLOCK_RECORDS_FILENAME )
lowerCAmelCase__ : Optional[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] ,B'''This is the first record''' )
| 74
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ""
lowercase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ = None # compression type in fsspec. ex: "gzip"
lowercase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict ,lowercase_ : str = "" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,**lowercase_ : Any ):
super().__init__(self ,**lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ : Dict = fsspec.open(
lowercase_ ,mode='''rb''' ,protocol=lowercase_ ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowerCAmelCase__ : Any = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ : Tuple = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ : Any = None
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ,lowercase_ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('''/''' )
def __lowerCAmelCase ( self : Optional[Any] ):
if self.dir_cache is None:
lowerCAmelCase__ : List[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ : str = {f['''name''']: f}
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ):
return self.file.open().read()
def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : List[Any]=None ,lowercase_ : Dict=True ,lowercase_ : Any=None ,**lowercase_ : Tuple ,):
lowerCAmelCase__ : Union[str, Any] = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "bz2"
lowercase__ = "bz2"
lowercase__ = ".bz2"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "gzip"
lowercase__ = "gzip"
lowercase__ = ".gz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "lz4"
lowercase__ = "lz4"
lowercase__ = ".lz4"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "xz"
lowercase__ = "xz"
lowercase__ = ".xz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "zstd"
lowercase__ = "zstd"
lowercase__ = ".zst"
def __init__( self : str ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,lowercase_ : int = DEFAULT_BLOCK_SIZE ,**lowercase_ : Union[str, Any] ,):
super().__init__(
fo=lowercase_ ,mode=lowercase_ ,target_protocol=lowercase_ ,target_options=lowercase_ ,block_size=lowercase_ ,**lowercase_ ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ : List[str] = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : Tuple = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : int ,*lowercase_ : str ,**lowercase_ : Optional[Any] ):
self._file.__exit__(*lowercase_ ,**lowercase_ )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def __lowerCAmelCase ( self : Tuple ):
return next(self._file )
def __getattr__( self : str ,lowercase_ : Any ):
return getattr(self._file ,lowercase_ )
def fixed_enter(*lowercase_ : List[Any] ,**lowercase_ : Dict ):
return WrappedFile(_enter(*lowercase_ ,**lowercase_ ) )
lowerCAmelCase__ : Any = fixed_enter
| 74
| 1
|
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case__ )
if number < 0:
return False
A = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 364
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE =True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE =object
def lowercase__( *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : int ):
pass
__SCREAMING_SNAKE_CASE =False
__SCREAMING_SNAKE_CASE =logging.get_logger("transformers-cli/serving")
def lowercase__( __SCREAMING_SNAKE_CASE : Namespace ):
lowercase_ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class UpperCamelCase ( lowercase_ ):
lowercase = 42
class UpperCamelCase ( lowercase_ ):
lowercase = 42
lowercase = 42
class UpperCamelCase ( lowercase_ ):
lowercase = 42
class UpperCamelCase ( lowercase_ ):
lowercase = 42
class UpperCamelCase ( lowercase_ ):
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = parser.add_parser(
'serve' ,help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' ,type=__UpperCamelCase ,choices=get_supported_tasks() ,help='The task to run the pipeline on' ,)
serve_parser.add_argument('--host' ,type=__UpperCamelCase ,default='localhost' ,help='Interface the server will listen on.' )
serve_parser.add_argument('--port' ,type=__UpperCamelCase ,default=8888 ,help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' ,type=__UpperCamelCase ,default=1 ,help='Number of http workers' )
serve_parser.add_argument('--model' ,type=__UpperCamelCase ,help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' ,type=__UpperCamelCase ,help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' ,type=__UpperCamelCase ,help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' ,type=__UpperCamelCase ,default=-1 ,help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' ,)
serve_parser.set_defaults(func=__UpperCamelCase )
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = pipeline
lowercase_ : List[Any] = host
lowercase_ : List[Any] = port
lowercase_ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
lowercase_ : List[Any] = FastAPI(
routes=[
APIRoute(
'/' ,self.model_info ,response_model=__UpperCamelCase ,response_class=__UpperCamelCase ,methods=['GET'] ,),
APIRoute(
'/tokenize' ,self.tokenize ,response_model=__UpperCamelCase ,response_class=__UpperCamelCase ,methods=['POST'] ,),
APIRoute(
'/detokenize' ,self.detokenize ,response_model=__UpperCamelCase ,response_class=__UpperCamelCase ,methods=['POST'] ,),
APIRoute(
'/forward' ,self.forward ,response_model=__UpperCamelCase ,response_class=__UpperCamelCase ,methods=['POST'] ,),
] ,timeout=600 ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCAmelCase ( self ,__UpperCamelCase = Body(__UpperCamelCase ,embed=__UpperCamelCase ) ,__UpperCamelCase = Body(__UpperCamelCase ,embed=__UpperCamelCase ) ) -> int:
'''simple docstring'''
try:
lowercase_ : Tuple = self._pipeline.tokenizer.tokenize(__UpperCamelCase )
if return_ids:
lowercase_ : int = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCamelCase )
return ServeTokenizeResult(tokens=__UpperCamelCase ,tokens_ids=__UpperCamelCase )
else:
return ServeTokenizeResult(tokens=__UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=500 ,detail={'model': '', 'error': str(__UpperCamelCase )} )
def _UpperCAmelCase ( self ,__UpperCamelCase = Body(__UpperCamelCase ,embed=__UpperCamelCase ) ,__UpperCamelCase = Body(__UpperCamelCase ,embed=__UpperCamelCase ) ,__UpperCamelCase = Body(__UpperCamelCase ,embed=__UpperCamelCase ) ,) -> Optional[Any]:
'''simple docstring'''
try:
lowercase_ : Tuple = self._pipeline.tokenizer.decode(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return ServeDeTokenizeResult(model='' ,text=__UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=500 ,detail={'model': '', 'error': str(__UpperCamelCase )} )
async def _UpperCAmelCase ( self ,__UpperCamelCase=Body(__UpperCamelCase ,embed=__UpperCamelCase ) ) -> Any:
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
lowercase_ : Any = self._pipeline(__UpperCamelCase )
return ServeForwardResult(output=__UpperCamelCase )
except Exception as e:
raise HTTPException(500 ,{'error': str(__UpperCamelCase )} )
| 213
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=None ,__UpperCamelCase=2 ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[str] = patch_size
lowercase_ : int = num_channels
lowercase_ : List[str] = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : str = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : int = scope
lowercase_ : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = ViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ViTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Any = 1
lowercase_ : List[Any] = ViTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : Dict = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Any = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase ,nn.Linear ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = ViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__UpperCamelCase )
lowercase_ : Any = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**__UpperCamelCase )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Optional[int] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__UpperCamelCase )
lowercase_ : int = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : int = inputs.pixel_values.to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : int = model(__UpperCamelCase ,interpolate_pos_encoding=__UpperCamelCase )
# verify the logits
lowercase_ : Any = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : Union[str, Any] = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )
| 213
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Optional[int] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 282
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) )
| 282
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase__ ( lowercase_ ,lowercase_=0 ) -> List[Any]:
"""simple docstring"""
return sorted(SCREAMING_SNAKE_CASE_ ,key=lambda lowercase_ : x[column] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=float("inf" ) ) -> Union[str, Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 ,SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
_UpperCamelCase : Any = current_dis
return min_dis
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=float("inf" ) ) -> Union[str, Any]:
"""simple docstring"""
for i in range(min(6 ,points_counts - 1 ) ,SCREAMING_SNAKE_CASE_ ):
for j in range(max(0 ,i - 6 ) ,SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
_UpperCamelCase : List[Any] = current_dis
return min_dis
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# recursion
_UpperCamelCase : Optional[int] = points_counts // 2
_UpperCamelCase : Optional[int] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,points_sorted_on_y[:mid] ,SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[str] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,points_sorted_on_y[mid:] ,points_counts - mid )
_UpperCamelCase : Tuple = min(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Tuple = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE_ ,len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
return min(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = column_based_sort(SCREAMING_SNAKE_CASE_ ,column=0 )
_UpperCamelCase : Dict = column_based_sort(SCREAMING_SNAKE_CASE_ ,column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: int = StableDiffusionInstructPixaPixPipeline
_lowerCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_lowerCamelCase: str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase: Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase: str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A = PNDMScheduler(skip_prk_steps=A_ )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Tuple=0 ) -> Union[str, Any]:
A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A_ ) ).to(A_ )
A = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A = Image.fromarray(np.uinta(A_ ) ).convert('RGB' )
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 'french fries'
A = sd_pipe(**A_ ,negative_prompt=A_ )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = [inputs['prompt']] * 2
A = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
A = torch.from_numpy(A_ ).unsqueeze(0 ).to(A_ )
A = image / 2 + 0.5
A = image.permute(0 ,3 ,1 ,2 )
A = image.repeat(2 ,1 ,1 ,1 )
A = sd_pipe(**A_ ).images
A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' )
A = StableDiffusionInstructPixaPixPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
A = [round(A_ ,4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(A_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**A_ )
A = VaeImageProcessor(do_resize=A_ ,do_normalize=A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = pipe(**self.get_dummy_inputs_by_type(A_ ,input_image_type='pt' ) )[0]
A = components['vae']
A = self.get_dummy_inputs_by_type(A_ ,input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A = vae.encode(inputs[image_param] ).latent_dist.mode()
A = pipe(**A_ )[0]
A = np.abs(out - out_latents_inputs ).max()
self.assertLess(A_ ,1e-4 ,'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int]=0 ) -> str:
A = torch.manual_seed(A_ )
A = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
A = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=A_ )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=A_ )
A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = 0
def callback_fn(A_ : int ,A_ : int ,A_ : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A = False
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=A_ ,torch_dtype=torch.floataa )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**A_ ,callback=A_ ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=A_ ,torch_dtype=torch.floataa )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**A_ )
A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A = inputs['image'].resize((504, 504) )
A = 'timbrooks/instruct-pix2pix'
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A_ ,safety_checker=A_ ,)
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = pipe(**A_ )
A = output.images[0]
A = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 74
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps
| 74
| 1
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=True , __UpperCAmelCase=2 ):
from .. import __version__
_lowercase : Tuple = take_from
_lowercase : Union[str, Any] = ()
if not isinstance(args[0] , __UpperCAmelCase ):
_lowercase : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse(__UpperCAmelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_lowercase : List[str] = None
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCAmelCase ),)
_lowercase : Union[str, Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__UpperCAmelCase , __UpperCAmelCase ):
values += (getattr(__UpperCAmelCase , __UpperCAmelCase ),)
_lowercase : List[str] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_lowercase : int = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_lowercase : Dict = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __UpperCAmelCase , stacklevel=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
_lowercase : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
_lowercase : List[Any] = call_frame.filename
_lowercase : Tuple = call_frame.lineno
_lowercase : Optional[Any] = call_frame.function
_lowercase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__UpperCAmelCase ) == 0:
return
elif len(__UpperCAmelCase ) == 1:
return values[0]
return values
| 359
|
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(
features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
a = Generator(
cache_dir=lowerCamelCase_ , features=lowerCamelCase_ , generator=lowerCamelCase_ , gen_kwargs=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 227
|
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''camembert'''
def __init__( self : List[str] , _A : int=30_522 , _A : Optional[Any]=768 , _A : Dict=12 , _A : List[str]=12 , _A : Any=3_072 , _A : Union[str, Any]="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : Dict=512 , _A : Tuple=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=1E-12 , _A : List[Any]=1 , _A : Tuple=0 , _A : Optional[Any]=2 , _A : List[str]="absolute" , _A : Tuple=True , _A : Union[str, Any]=None , **_A : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[int] = vocab_size
lowercase : Dict = hidden_size
lowercase : int = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Tuple = hidden_act
lowercase : List[str] = intermediate_size
lowercase : Optional[int] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Optional[Any] = max_position_embeddings
lowercase : int = type_vocab_size
lowercase : Tuple = initializer_range
lowercase : List[Any] = layer_norm_eps
lowercase : Dict = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
class _A ( _lowerCamelCase ):
@property
def __a ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 356
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''input_values''', '''attention_mask''']
def __init__( self : Optional[Any] , _A : int = 1 , _A : int = 16_000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7_600 , _A : float = 1E-10 , _A : int = 2 , _A : bool = True , **_A : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
lowercase : str = do_normalize
lowercase : int = return_attention_mask
lowercase : Union[str, Any] = num_mel_bins
lowercase : Union[str, Any] = hop_length
lowercase : Dict = win_length
lowercase : Union[str, Any] = win_function
lowercase : int = frame_signal_scale
lowercase : Dict = fmin
lowercase : Optional[Any] = fmax
lowercase : str = mel_floor
lowercase : Dict = reduction_factor
lowercase : List[Any] = win_length * sampling_rate // 1_000
lowercase : Union[str, Any] = hop_length * sampling_rate // 1_000
lowercase : Optional[Any] = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
lowercase : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[int] = np.array(_A , np.intaa )
lowercase : Dict = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : List[str] = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __a ( self : Any , _A : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : List[Any] , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : Tuple , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
lowercase : Any = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
lowercase : Any = None
if audio_target is not None:
lowercase : Tuple = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
lowercase : Any = inputs_target['''input_values''']
lowercase : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def __a ( self : List[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : Any , ) -> BatchFeature:
"""simple docstring"""
lowercase : Optional[int] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[str] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase : List[str] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase : Any = self.feature_size
# convert into correct format for padding
if is_target:
lowercase : int = [self._extract_mel_features(_A ) for waveform in speech]
lowercase : Any = BatchFeature({'''input_values''': features} )
lowercase : Optional[Any] = self.num_mel_bins
else:
lowercase : Optional[Any] = BatchFeature({'''input_values''': speech} )
lowercase : Optional[int] = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
lowercase : str = feature_size_hack
# convert input values to correct format
lowercase : List[Any] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
lowercase : List[str] = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase : int = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase : Any = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase : Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
lowercase : Tuple = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase : Optional[int] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 116
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = args.log_outputs
UpperCAmelCase__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ = load_metric('wer' )
UpperCAmelCase__ = load_metric('cer' )
# compute metrics
UpperCAmelCase__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCAmelCase__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCAmelCase__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowerCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ = f'''log_{dataset_id}_predictions.txt'''
UpperCAmelCase__ = f'''log_{dataset_id}_targets.txt'''
with open(lowerCamelCase , 'w' ) as p, open(lowerCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase , lowerCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowerCamelCase , with_indices=lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ = re.sub(lowerCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCAmelCase__ = ' '.join(text.split(lowerCamelCase ) )
return text
def a_ ( lowerCamelCase ):
# load dataset
UpperCAmelCase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase ):
UpperCAmelCase__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ = prediction['text']
UpperCAmelCase__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCAmelCase__ = dataset.map(lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase__ : Dict = parser.parse_args()
main(args)
| 98
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> float:
snake_case__ : str = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
snake_case__ : Optional[int] = 1 - (matter_density + radiation_density + dark_energy)
snake_case__ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__a = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 365
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : bool , _snake_case : list[int] , _snake_case : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
)
def _snake_case ( ):
lowerCAmelCase : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase : Union[str, Any] = math.log(len(_snake_case ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , _snake_case , _snake_case , _snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Union[str, Any] = 1_00
A : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def a__ ( __UpperCamelCase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a__ ( __UpperCamelCase = 5_0_0_0 ):
for number_to_partition in range(1 , __UpperCamelCase ):
if len(partition(__UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Union[str, Any] ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__lowercase = '''xvjiarui/stable-diffusion-2-inpainting'''
__lowercase = FlaxStableDiffusionInpaintPipeline.from_pretrained(__A, safety_checker=__A )
__lowercase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = num_samples * [init_image]
__lowercase = num_samples * [mask_image]
__lowercase = pipeline.prepare_inputs(__A, __A, __A )
# shard inputs and rng
__lowercase = replicate(__A )
__lowercase = jax.random.split(__A, jax.device_count() )
__lowercase = shard(__A )
__lowercase = shard(__A )
__lowercase = shard(__A )
__lowercase = pipeline(
__A, __A, __A, __A, __A, __A, jit=__A )
__lowercase = output.images.reshape(__A, 5_1_2, 5_1_2, 3 )
__lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 17
|
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 0
|
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = 9, 1_4 # noqa: F841
lowerCAmelCase_ :Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCAmelCase_ :str = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase_ :str = mst(_lowerCAmelCase )
lowerCAmelCase_ :Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase_ :Tuple = tuple(answer[:2] )
lowerCAmelCase_ :int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 84
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "xmod"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=("en_XX",), lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : int = vocab_size
A : int = hidden_size
A : str = num_hidden_layers
A : List[str] = num_attention_heads
A : List[str] = hidden_act
A : Dict = intermediate_size
A : Optional[int] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Union[str, Any] = position_embedding_type
A : Any = use_cache
A : int = classifier_dropout
A : int = pre_norm
A : List[str] = adapter_reduction_factor
A : Any = adapter_layer_norm
A : Any = adapter_reuse_layer_norm
A : str = ln_before_adapter
A : Dict = list(lowerCamelCase__ )
A : Union[str, Any] = default_language
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Union[str, Any] = BarthezTokenizer
_lowercase : Any = BarthezTokenizerFast
_lowercase : Any = True
_lowercase : Dict = True
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
a__ : Any =BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
a__ : int =tokenizer
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[Any] ="<pad>"
a__ : List[str] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 )
def _lowercase ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Dict =["A long paragraph for summarization.", "Another paragraph for summarization."]
a__ : Dict =[0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
a__ : Union[str, Any] =self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a__ : Optional[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : str =self.get_tokenizer()
a__ : Any =self.get_rust_tokenizer()
a__ : List[Any] ="I was born in 92000, and this is falsé."
a__ : Optional[int] =tokenizer.tokenize(lowerCAmelCase__ )
a__ : Union[str, Any] =rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : List[str] =rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] =self.get_rust_tokenizer()
a__ : str =tokenizer.encode(lowerCAmelCase__ )
a__ : int =rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Any ={"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a__ : Any =[
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
| 148
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a__ : Any =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : Any =text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
a__ : Tuple =text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : List[Any] =text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
a__ : Any =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : List[str] =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
a__ : Optional[int] =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : int =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
import torch
a__ : Dict =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =pipeline("text-classification" )
a__ : Union[str, Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : Optional[Any] =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =pipeline("text-classification" , framework="tf" )
a__ : str =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : str =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a__ : List[Any] ="HuggingFace is in"
a__ : int =text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a__ : Optional[int] =["HuggingFace is in ", "Paris is in France"]
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a__ : Union[str, Any] =text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
a__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
a__ : List[str] ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a__ : Any =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a__ : Optional[int] =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 148
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def _snake_case ( ) -> List[str]:
lowerCamelCase_ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowerCamelCase_ : str =parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowerCamelCase_ : List[str] =json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =dpr_record['''question''']
lowerCamelCase_ : Tuple =[context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 144
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ :int = logging.get_logger(__name__)
A_ :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
A_ :List[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
A_ :List[str] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def A ( ) -> Optional[int]:
__UpperCamelCase : int =(
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
__UpperCamelCase : Optional[int] =bs[:]
__UpperCamelCase : str =0
for b in range(2**8 ):
if b not in bs:
bs.append(a_ )
cs.append(2**8 + n )
n += 1
__UpperCamelCase : int =[chr(a_ ) for n in cs]
return dict(zip(a_ ,a_ ) )
def A ( a_ ) -> Dict:
__UpperCamelCase : Dict =set()
__UpperCamelCase : Any =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : Optional[int] =char
return pairs
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__UpperCamelCase : str =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__UpperCamelCase : Dict =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__UpperCamelCase : Optional[int] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : Optional[int] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase : Tuple =json.load(lowerCamelCase__ )
__UpperCamelCase : int ={v: k for k, v in self.encoder.items()}
__UpperCamelCase : List[Any] =errors # how to handle errors in decoding
__UpperCamelCase : Optional[Any] =bytes_to_unicode()
__UpperCamelCase : Dict ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__UpperCamelCase : Optional[Any] =merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase : List[Any] =[tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase : int =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : List[str] ={}
__UpperCamelCase : Dict =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase : Any =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def __lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Union[str, Any] =tuple(lowerCamelCase__ )
__UpperCamelCase : Any =get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__UpperCamelCase : Any =min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase : int =bigram
__UpperCamelCase : Dict =[]
__UpperCamelCase : Optional[Any] =0
while i < len(lowerCamelCase__ ):
try:
__UpperCamelCase : Optional[Any] =word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : Tuple =j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : List[Any] =tuple(lowerCamelCase__ )
__UpperCamelCase : Any =new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__UpperCamelCase : Optional[Any] =get_pairs(lowerCamelCase__ )
__UpperCamelCase : List[str] =' '.join(lowerCamelCase__ )
__UpperCamelCase : str =word
return word
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =[]
for token in re.findall(self.pat , lowerCamelCase__ ):
__UpperCamelCase : Dict =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =''.join(lowerCamelCase__ )
__UpperCamelCase : str =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : str =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__UpperCamelCase : Optional[Any] =0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase : str =token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Optional[Any] =[self.cls_token_id]
__UpperCamelCase : Any =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[self.sep_token_id]
__UpperCamelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__UpperCamelCase : List[str] =' ' + text
return (text, kwargs)
| 352
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( a_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(a_ ,a_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCamelCase : str =QuantumRegister(a_ ,'qr' )
__UpperCamelCase : Optional[int] =ClassicalRegister(a_ ,'cr' )
__UpperCamelCase : Optional[Any] =QuantumCircuit(a_ ,a_ )
__UpperCamelCase : Any =number_of_qubits
for i in range(a_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,a_ ,a_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a_ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a_ ,a_ )
# simulate with 10000 shots
__UpperCamelCase : Any =Aer.get_backend('qasm_simulator' )
__UpperCamelCase : Tuple =execute(a_ ,a_ ,shots=10_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 245
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str="last" , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , ) -> int:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , ) -> Any:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> List[str]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 1
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : int = 1_6 ):
"""simple docstring"""
_a : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Dict = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Any = 1_6
elif accelerator.mixed_precision != "no":
_a : Any = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=__a )
_a : int = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ (__a : Dict , __a : str ):
"""simple docstring"""
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : List[str] = config['lr']
_a : Optional[Any] = int(config['num_epochs'] )
_a : Union[str, Any] = int(config['seed'] )
_a : List[str] = int(config['batch_size'] )
_a : List[str] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__a )
_a, _a : int = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_a : Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Any = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Union[str, Any] = model(**__a )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_a, _a : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_a : List[Any] = parser.parse_args()
_a : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 5
|
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCamelCase__ : Optional[Any] = '</w>'
lowerCamelCase__ : Union[str, Any] = '@@ '
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase__ : Any = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Any="<pad>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
else:
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def lowerCAmelCase_ ( self : List[str] ):
return len(self.decoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE_ = '\n' + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = word.replace(_lowerCAmelCase , '' )
SCREAMING_SNAKE_CASE_ = word.replace(' ' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = text.lower()
SCREAMING_SNAKE_CASE_ = text.split()
SCREAMING_SNAKE_CASE_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE_ = ''.join(string.split(_lowerCAmelCase ) )
return string
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 225
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Tuple = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : int = size if size is not None else {"shortest_edge": 224}
snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case : Dict = do_resize
snake_case : Optional[int] = size
snake_case : int = resample
snake_case : Union[str, Any] = do_center_crop
snake_case : Dict = crop_size
snake_case : Dict = do_rescale
snake_case : Any = rescale_factor
snake_case : Tuple = do_normalize
snake_case : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Tuple = do_convert_rgb
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : int = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = size if size is not None else self.size
snake_case : Dict = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = resample if resample is not None else self.resample
snake_case : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : Optional[int] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
snake_case : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case : Optional[Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Tuple = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 148
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCamelCase :
def __init__( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[Any]=1_3 ,_UpperCamelCase :Union[str, Any]=3_0 ,_UpperCamelCase :int=2 ,_UpperCamelCase :Dict=3 ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Optional[int]=2 ,_UpperCamelCase :int=4 ,_UpperCamelCase :List[str]=3_7 ,_UpperCamelCase :Optional[Any]="gelu" ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :List[str]=1_0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=2 ,):
snake_case_ : Optional[Any] = parent
snake_case_ : int = batch_size
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Tuple = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Any = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = scope
snake_case_ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ : Dict = (image_size // patch_size) ** 2
snake_case_ : List[str] = num_patches + 2
def a__ ( self :Any ):
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : str = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self :Any ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFDeiTModel(config=_UpperCamelCase )
snake_case_ : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self :List[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = TFDeiTForMaskedImageModeling(config=_UpperCamelCase )
snake_case_ : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ : Dict = 1
snake_case_ : Dict = TFDeiTForMaskedImageModeling(_UpperCamelCase )
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :int ):
snake_case_ : Tuple = self.type_sequence_label_size
snake_case_ : Dict = TFDeiTForImageClassification(_UpperCamelCase )
snake_case_ : Tuple = model(_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = TFDeiTForImageClassification(_UpperCamelCase )
snake_case_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self :Dict ):
snake_case_ : Dict = self.prepare_config_and_inputs()
snake_case_ : Optional[Any] = config_and_inputs
snake_case_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase : List[str] = False
lowercase : Dict = False
lowercase : Any = False
lowercase : Dict = False
def a__ ( self :Optional[Any] ):
snake_case_ : str = TFDeiTModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
def a__ ( self :Optional[Any] ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase ,tf.keras.layers.Dense ) )
def a__ ( self :str ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(_UpperCamelCase )
snake_case_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : int = [*signature.parameters.keys()]
snake_case_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :List[Any] ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Union[str, Any]=False ):
snake_case_ : str = super()._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ ( self :Union[str, Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[str] = TFDeiTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def a__ ( self :Any ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def a__ ( self :List[Any] ):
snake_case_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : List[str] = prepare_img()
snake_case_ : Dict = image_processor(images=_UpperCamelCase ,return_tensors="""tf""" )
# forward pass
snake_case_ : Optional[int] = model(**_UpperCamelCase )
# verify the logits
snake_case_ : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
snake_case_ : Union[str, Any] = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_UpperCamelCase ,atol=1E-4 ) )
| 367
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 8
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : int | None = None ):
lowerCAmelCase__ : List[str] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ,lowercase_ : Node | None = None ):
lowerCAmelCase__ : Union[str, Any] = root
def __str__( self : Union[str, Any] ):
return str(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Node ,lowercase_ : Node | None ):
if new_children is not None: # reset its kids
lowerCAmelCase__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowerCAmelCase__ : Union[str, Any] = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
else:
lowerCAmelCase__ : List[str] = new_children
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __lowerCAmelCase ( self : List[str] ):
return self.root is None
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Dict = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : int = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Any = new_node
break
else:
lowerCAmelCase__ : List[str] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def __lowerCAmelCase ( self : str ,*lowercase_ : Dict ):
for value in values:
self.__insert(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Any ):
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : Tuple = node.left if value < node.value else node.right
return node
def __lowerCAmelCase ( self : int ,lowercase_ : Node | None = None ):
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : str = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : List[Any] = node.right
return node
def __lowerCAmelCase ( self : Any ,lowercase_ : Node | None = None ):
if node is None:
lowerCAmelCase__ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : str = self.root
while node.left is not None:
lowerCAmelCase__ : int = node.left
return node
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : Tuple = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ ,lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ ,node.left )
else:
lowerCAmelCase__ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __lowerCAmelCase ( self : str ,lowercase_ : List[str]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : list ,lowercase_ : Node | None ):
if node:
self.inorder(lowercase_ ,node.left )
arr.append(node.value )
self.inorder(lowercase_ ,node.right )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ,lowercase_ : Node ):
lowerCAmelCase__ : list[int] = []
self.inorder(lowercase_ ,lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
lowerCAmelCase__ : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(A_ )
# Prints all the elements of the list in order traversal
print(A_ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A_ )
print(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 106
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowercase ( _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = torch.exp(_A )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(_A , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE : Dict = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.output_attentions
SCREAMING_SNAKE_CASE : Any = config.output_hidden_states
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self : str , UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE : Tuple = x
else:
SCREAMING_SNAKE_CASE : Optional[Any] = x
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
SCREAMING_SNAKE_CASE : Dict = ()
SCREAMING_SNAKE_CASE : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : int = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE : Dict = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Dict = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
SCREAMING_SNAKE_CASE : Dict = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE : Dict = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
SCREAMING_SNAKE_CASE : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->List[str]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : Union[str, Any] = BertEmbeddings(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DeeBertEncoder(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPooler(UpperCAmelCase__ )
self.init_weights()
def _lowercase ( self : str ) ->Optional[int]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = value
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict ) ->str:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , ) ->int:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : str = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE : str = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE : str = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE : str = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : int = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = message
SCREAMING_SNAKE_CASE : str = exit_layer # start from 1!
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Any ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE : List[Any] = bmodel_output[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = DeeBertModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : List[str]=False , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_layers
try:
SCREAMING_SNAKE_CASE : str = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.classifier(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Optional[int] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[Any] = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[str] = MSELoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Any = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : int = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 245
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Any = 256
class A__ ( __lowerCamelCase ):
_UpperCAmelCase :Union[str, Any] = ['melgan']
def __init__( self , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
# From MELGAN
UpperCamelCase : Optional[Any] = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase : Tuple = 4.0 # Largest value for most examples
UpperCamelCase : int = 128
self.register_modules(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
def __UpperCamelCase( self , A_ , A_=(-1.0, 1.0) , A_=False ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = output_range
if clip:
UpperCamelCase : Tuple = torch.clip(UpperCamelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase( self , A_ , A_=(-1.0, 1.0) , A_=False ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = input_range
UpperCamelCase : Optional[Any] = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase : Optional[int] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = input_tokens > 0
UpperCamelCase , UpperCamelCase : Optional[int] = self.notes_encoder(
encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.continuous_encoder(
encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = noise_time
if not torch.is_tensor(UpperCamelCase_ ):
UpperCamelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase : Optional[int] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase : Any = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase : Dict = self.decoder(
encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ )
return logits
@torch.no_grad()
def __call__( self , A_ , A_ = None , A_ = 100 , A_ = True , A_ = "numpy" , A_ = None , A_ = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCamelCase_ )}.""" )
UpperCamelCase : Any = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase : Tuple = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase_ ):
if i == 0:
UpperCamelCase : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase : Dict = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase : Optional[int] = ones
UpperCamelCase : int = self.scale_features(
UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ )
UpperCamelCase : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase : Any = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase : str = self.decode(
encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase : int = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
UpperCamelCase : Any = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] )
UpperCamelCase : str = mel[:1]
UpperCamelCase : List[str] = mel.cpu().float().numpy()
UpperCamelCase : List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ )
logger.info("Generated segment" , UpperCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
UpperCamelCase : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase : Tuple = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 357
|
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCamelCase : str = number_of_bytes // partitions
UpperCamelCase : List[Any] = []
for i in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = i * bytes_per_partition + 1
UpperCamelCase : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 0
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def UpperCAmelCase_ ( __snake_case , __snake_case = 16 ) -> Any:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase =16
elif accelerator.mixed_precision != "no":
_lowercase =8
else:
_lowercase =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
_lowercase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['''lr''']
_lowercase =int(config['''num_epochs'''] )
_lowercase =int(config['''seed'''] )
_lowercase =int(config['''batch_size'''] )
_lowercase =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowercase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase =batch_size // MAX_GPU_BATCH_SIZE
_lowercase =MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
_lowercase , _lowercase =get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase =model.to(accelerator.device )
# Instantiate optimizer
_lowercase =AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
_lowercase =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase =model(**__snake_case )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**__snake_case )
_lowercase =outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
_lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowercase =parser.parse_args()
_lowercase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 5
|
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) -> str:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
A__ = 'mock-s3-bucket'
A__ = f"""s3://{mock_bucket}"""
A__ = extract_path_from_uri(lowercase_ )
assert dataset_path.startswith("s3://" ) is False
A__ = './local/path'
A__ = extract_path_from_uri(lowercase_ )
assert dataset_path == new_dataset_path
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
A__ = is_remote_filesystem(lowercase_ )
assert is_remote is True
A__ = fsspec.filesystem("file" )
A__ = is_remote_filesystem(lowercase_ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
A__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
A__ = input_paths[compression_fs_class.protocol]
if input_path is None:
A__ = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase_ )
A__ = fsspec.filesystem(compression_fs_class.protocol , fo=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
A__ = os.path.basename(lowercase_ )
A__ = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowercase_ , "r" , encoding="utf-8" ) as f, open(lowercase_ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
A__ = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
A__ = compressed_file_paths[protocol]
A__ = 'dataset.jsonl'
A__ = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
A__ = fsspec.get_fs_token_paths(lowercase_ )
assert fs.isfile(lowercase_ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
A__ = hf_api.dataset_info(lowercase_ , token=lowercase_ )
A__ = HfFileSystem(repo_info=lowercase_ , token=lowercase_ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowercase_ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
A__ = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowercase_ , lowercase_ , clobber=lowercase_ )
with pytest.warns(lowercase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowercase_ ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 356
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
A__ = True
for _ in range(lowercase_ ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
_A: List[Any] = checkpoint
_A: Tuple = {}
_A: Dict = vae_state_dict["""encoder.conv_in.weight"""]
_A: Optional[Any] = vae_state_dict["""encoder.conv_in.bias"""]
_A: Dict = vae_state_dict["""encoder.conv_out.weight"""]
_A: Union[str, Any] = vae_state_dict["""encoder.conv_out.bias"""]
_A: List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
_A: Tuple = vae_state_dict["""encoder.norm_out.bias"""]
_A: Dict = vae_state_dict["""decoder.conv_in.weight"""]
_A: Tuple = vae_state_dict["""decoder.conv_in.bias"""]
_A: Optional[int] = vae_state_dict["""decoder.conv_out.weight"""]
_A: Optional[int] = vae_state_dict["""decoder.conv_out.bias"""]
_A: Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""]
_A: Union[str, Any] = vae_state_dict["""decoder.norm_out.bias"""]
_A: Optional[int] = vae_state_dict["""quant_conv.weight"""]
_A: int = vae_state_dict["""quant_conv.bias"""]
_A: Union[str, Any] = vae_state_dict["""post_quant_conv.weight"""]
_A: Any = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_A: int = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_A: Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_A: Dict = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_A: Optional[int] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
_A: List[Any] = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_A: Optional[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_A: int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_A: Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
_A: Optional[Any] = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
_A: Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_A: Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_A: Dict = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_A: Tuple = renew_vae_resnet_paths(__lowerCamelCase )
_A: Tuple = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
_A: List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_A: str = renew_vae_attention_paths(__lowerCamelCase )
_A: List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
_A: Optional[Any] = num_up_blocks - 1 - i
_A: Union[str, Any] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_A: int = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_A: Dict = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_A: Dict = renew_vae_resnet_paths(__lowerCamelCase )
_A: Optional[Any] = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
_A: Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_A: Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_A: Dict = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_A: List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
_A: int = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
_A: Dict = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_A: List[Any] = renew_vae_attention_paths(__lowerCamelCase )
_A: List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def lowerCamelCase__ ( a , a , ) -> Union[str, Any]:
# Only support V1
_A: Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_A: Optional[int] = io.BytesIO(r.content )
_A: Dict = OmegaConf.load(__lowerCamelCase )
_A: str = 5_12
_A: Any = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_A: List[Any] = {}
with safe_open(__lowerCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
_A: str = f.get_tensor(__lowerCamelCase )
else:
_A: Optional[int] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
_A: Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
_A: Union[str, Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
_A: Optional[int] = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 121
|
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__UpperCAmelCase : Any = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : str = []
if len(_a ) == 1:
return [nums.copy()]
for _ in range(len(_a ) ):
lowerCAmelCase__ : Tuple = nums.pop(0 )
lowerCAmelCase__ : Optional[int] = permute(_a )
for perm in permutations:
perm.append(_a )
result.extend(_a )
nums.append(_a )
return result
def lowerCamelCase_ ( _a ):
"""simple docstring"""
def backtrack(_a ):
if start == len(_a ) - 1:
output.append(nums[:] )
else:
for i in range(_a , len(_a ) ):
lowerCAmelCase__ : Optional[Any] = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase__ : Any = nums[i], nums[start] # backtrack
lowerCAmelCase__ : Dict = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCamelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 356
|
class _a :
def __init__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] )-> Tuple:
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Union[str, Any] = graph
self._normalize_graph(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = None
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
if sources is int:
lowerCAmelCase__ : List[Any] = [sources]
if sinks is int:
lowerCAmelCase__ : Optional[Any] = [sinks]
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
return
lowerCAmelCase__ : Union[str, Any] = sources[0]
lowerCAmelCase__ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_SCREAMING_SNAKE_CASE ) > 1 or len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase__ : List[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ : Optional[Any] = max_input_flow
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ : str = max_input_flow
lowerCAmelCase__ : Tuple = size - 1
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[Any] )-> int:
lowerCAmelCase__ : int = algorithm(self )
class _a :
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = flow_network
lowerCAmelCase__ : Dict = flow_network.verticesCount
lowerCAmelCase__ : Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ : str = flow_network.graph
lowerCAmelCase__ : Optional[int] = False
def UpperCAmelCase__( self : List[str] )-> Dict:
if not self.executed:
self._algorithm()
lowerCAmelCase__ : Any = True
def UpperCAmelCase__( self : Optional[Any] )-> int:
pass
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
# use this to save your result
lowerCAmelCase__ : Dict = -1
def UpperCAmelCase__( self : Any )-> Optional[Any]:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ : Optional[Any] = [0] * self.verticies_count
lowerCAmelCase__ : str = [0] * self.verticies_count
def UpperCAmelCase__( self : Any )-> List[Any]:
lowerCAmelCase__ : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = vertices_list[i]
lowerCAmelCase__ : Any = self.heights[vertex_index]
self.process_vertex(_SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[Any] = 0
else:
i += 1
lowerCAmelCase__ : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.relabel(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ : List[Any] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ : Optional[Any] = min_height + 1
if __name__ == "__main__":
lowerCamelCase = [0]
lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 211
| 0
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowerCamelCase ( __A , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = BertJapaneseTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(_UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_UpperCamelCase )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_UpperCamelCase )
_UpperCAmelCase = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
try:
_UpperCAmelCase = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_UpperCamelCase )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_UpperCamelCase )
_UpperCAmelCase = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_UpperCamelCase )
_UpperCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_UpperCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
_UpperCAmelCase = pickle.load(_UpperCamelCase )
_UpperCAmelCase = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_UpperCAmelCase = {}
for i, token in enumerate(_UpperCamelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_UpperCAmelCase = tokenizer.subword_tokenizer
_UpperCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_UpperCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_UpperCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_UpperCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_UpperCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCamelCase ( __A , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = BertJapaneseTokenizer
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_UpperCamelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
_UpperCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_UpperCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase = {}
for i, token in enumerate(_UpperCamelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_UpperCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cl-tohoku/bert-base-japanese'
_UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_UpperCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 39
|
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution())
| 8
| 0
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ :int = 10
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Dict ) -> int:
_UpperCamelCase : Tuple = {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCamelCase : List[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Any = self.dummy_model()
_UpperCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : List[str] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[str] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : str = model(__a , __a )
_UpperCamelCase : Optional[int] = scheduler.step(__a , __a , __a )
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(__a ) )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
if torch_device == "mps":
return
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : int = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : str = model(__a , __a )
_UpperCamelCase : Any = scheduler.step(__a , __a , __a )
_UpperCamelCase : int = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Any = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
if torch_device == "mps":
return
_UpperCamelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCamelCase : List[str] = self.get_scheduler_config()
_UpperCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : Dict = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase : Dict = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : Optional[Any] = model(__a , __a )
_UpperCamelCase : Union[str, Any] = scheduler.step(__a , __a , __a )
_UpperCamelCase : Optional[int] = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(__a ) )
_UpperCamelCase : str = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 310
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = []
_A , _A : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_A : List[Any] = result + left + right
return input_list
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) <= 1:
return input_list
_A : Dict = list(snake_case_ )
# iteration for two-way merging
_A : Union[str, Any] = 2
while p <= len(snake_case_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0,len(snake_case_ ),snake_case_ ):
_A : Tuple = i
_A : Optional[Any] = i + p - 1
_A : Optional[int] = (low + high + 1) // 2
_A : Dict = merge(snake_case_,snake_case_,snake_case_,snake_case_ )
# final merge of last two parts
if p * 2 >= len(snake_case_ ):
_A : Dict = i
_A : str = merge(snake_case_,0,snake_case_,len(snake_case_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_snake_case = []
else:
_snake_case = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 26
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase = """sshleifer/bart-tiny-random"""
_UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : int = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : str = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : List[str] = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=lowercase , d=lowercase )
| 140
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _a ( _lowercase , _lowercase):
_a : Optional[Any] = '''resnet'''
_a : int = ['''basic''', '''bottleneck''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=3 , _SCREAMING_SNAKE_CASE : List[Any]=64 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[256, 512, 1024, 2048] , _SCREAMING_SNAKE_CASE : Any=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE : str="bottleneck" , _SCREAMING_SNAKE_CASE : Union[str, Any]="relu" , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=None , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = embedding_size
lowerCAmelCase__ : Tuple = hidden_sizes
lowerCAmelCase__ : Union[str, Any] = depths
lowerCAmelCase__ : Tuple = layer_type
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : int = downsample_in_first_stage
lowerCAmelCase__ : int = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class _a ( _lowercase):
_a : Any = version.parse('''1.11''')
@property
def UpperCAmelCase__( self : Tuple )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__( self : Optional[int] )-> float:
return 1E-3
| 351
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( _a ):
"""simple docstring"""
def wrapper(*_a , **_a ):
lowerCAmelCase__ : List[str] = timeit.default_timer()
lowerCAmelCase__ : List[Any] = func(*_a , **_a )
lowerCAmelCase__ : Any = timeit.default_timer() - starttime
return delta
lowerCAmelCase__ : Any = func.__name__
return wrapper
def lowerCamelCase_ ( _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : str = seq_shapes or {}
for i in range(_a ):
lowerCAmelCase__ : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_a , _ArrayXD ):
lowerCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_a , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase__ : Dict = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase__ : Any = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_a , datasets.Sequence ):
while isinstance(_a , datasets.Sequence ):
lowerCAmelCase__ : Optional[int] = v.feature
lowerCAmelCase__ : str = seq_shapes[k]
lowerCAmelCase__ : Any = np.random.rand(*_a ).astype(v.dtype )
lowerCAmelCase__ : int = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( _a , _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = generate_examples(_a , num_examples=_a , seq_shapes=_a )
with ArrowWriter(features=_a , path=_a ) as writer:
for key, record in dummy_data:
lowerCAmelCase__ : Optional[int] = features.encode_example(_a )
writer.write(_a )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCAmelCase__ : List[Any] = datasets.Dataset.from_file(filename=_a , info=datasets.DatasetInfo(features=_a ) )
return dataset
| 211
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
lowercase_ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "new" , lowerCAmelCase__ : list | None = None ) -> dict:
__a = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
__a = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowerCAmelCase__ )
__a = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
__a = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
__a = {}
for id_ in range(lowerCAmelCase__ ):
__a = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __lowerCamelCase ( lowerCamelCase__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase : Optional[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowercase ( a_ ):
"""simple docstring"""
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'Loading model {model_type}' )
lowerCamelCase = model_type
lowerCamelCase = tf_checkpoint
lowerCamelCase = pytorch_dump_output
lowerCamelCase = config
lowerCamelCase = finetuning_task_name
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase = self._tf_checkpoint
lowerCamelCase = """"""
else:
lowerCamelCase = self._tf_checkpoint
lowerCamelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 66
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class SCREAMING_SNAKE_CASE_ :
pass
| 327
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
lowercase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['input_ids', 'attention_mask']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__(self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
_a = vocab_file
_a = load_json(A )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(A , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else '''en'''
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def a__ (self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ (self , A ) -> None:
"""simple docstring"""
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def a__ (self , A ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def a__ (self , A ) -> Dict:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
_a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None , A = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self ) -> Dict:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> None:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = Path(A )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def a__ (self , A , A = "en" , A = None , A = "ro" , **A , ) -> BatchEncoding:
"""simple docstring"""
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def a__ (self , A , A , A , **A ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(A , add_special_tokens=A , **A )
_a = self.get_lang_id(A )
_a = tgt_lang_id
return inputs
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ) -> Tuple:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a__ (self , A ) -> int:
"""simple docstring"""
_a = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**__A)
spm.Load(str(__A))
return spm
def lowerCAmelCase (__A):
"""simple docstring"""
with open(__A , '''r''') as f:
return json.load(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
with open(__A , '''w''') as f:
json.dump(__A , __A , indent=2)
| 211
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase (_a ):
_lowercase = (KDPMaDiscreteScheduler,)
_lowercase = 10
def snake_case_ ( self: Optional[int],**A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**A_ )
return config
def snake_case_ ( self: Dict ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1],[0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=A_,beta_end=A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(A_,A_ )
__UpperCamelCase = model(A_,A_ )
__UpperCamelCase = scheduler.step(A_,A_,A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1E-3
def snake_case_ ( self: int ):
'''simple docstring'''
if torch_device == "mps":
return
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(A_,A_ )
__UpperCamelCase = model(A_,A_ )
__UpperCamelCase = scheduler.step(A_,A_,A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
def snake_case_ ( self: Dict ):
'''simple docstring'''
if torch_device == "mps":
return
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps,device=A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(A_,A_ )
__UpperCamelCase = model(A_,A_ )
__UpperCamelCase = scheduler.step(A_,A_,A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if str(A_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
'''simple docstring'''
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _snake_case ( lowerCamelCase__ ):
def __init__( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = data
def __iter__( self ) -> Any:
'''simple docstring'''
for element in self.data:
yield element
def UpperCamelCase_( snake_case : List[str]=True ):
'''simple docstring'''
snake_case_ = Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCamelCase_( snake_case : int , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] = False ):
'''simple docstring'''
if iterable:
snake_case_ = DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
snake_case_ = TensorDataset(torch.as_tensor(range(_A ) ) )
snake_case_ = DataLoader(_A , batch_size=_A )
snake_case_ = accelerator.prepare(_A )
return dl
def UpperCamelCase_( snake_case : str , snake_case : Dict , snake_case : Tuple , snake_case : Any , snake_case : Any , ):
'''simple docstring'''
snake_case_ = create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A )
snake_case_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = create_accelerator(even_batches=_A )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_A )
snake_case_ = create_dataloader(_A , dataset_size=3 , batch_size=1 )
snake_case_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
snake_case_ = ddp_model(batch[0].float() )
snake_case_ = output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=_A )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_A )
snake_case_ = create_dataloader(_A , dataset_size=3 , batch_size=1 )
snake_case_ = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
snake_case_ = train_dl.batch_sampler.even_batches
snake_case_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=_A )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
snake_case_ = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
snake_case_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = create_accelerator()
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
snake_case_ = accelerator.state.distributed_type
snake_case_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
snake_case_ = original_state
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_SCREAMING_SNAKE_CASE : Any = "▁"
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ = len(self.sp_model ) - 1
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 92
| 0
|
import argparse
import datetime
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 1_1:
raise ValueError('''Must be 10 characters long''' )
# Get month
lowerCAmelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''' )
lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
lowerCAmelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
lowerCAmelCase = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
lowerCAmelCase = y - 1
lowerCAmelCase = m + 1_2
# maths var
lowerCAmelCase = int(str(__A )[:2] )
lowerCAmelCase = int(str(__A )[2:] )
lowerCAmelCase = int(2.6 * m - 5.39 )
lowerCAmelCase = int(c / 4 )
lowerCAmelCase = int(k / 4 )
lowerCAmelCase = int(d + k )
lowerCAmelCase = int(t + u + v + x )
lowerCAmelCase = int(z - (2 * c) )
lowerCAmelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
lowerCAmelCase = f"Your date {date_input}, is a {days[str(__A )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowercase__ : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 338
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'gptj'
__lowerCamelCase : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , A=50_400 , A=2_048 , A=4_096 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1E-5 , A=0.02 , A=True , A=50_256 , A=50_256 , A=False , **A , ) -> Tuple:
"""simple docstring"""
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A = "default" , A = None , A = False , ) -> List[str]:
"""simple docstring"""
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , '''pad_token_id''' , A ):
# TODO: how to do that better?
_a = 0
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def a__ (self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return 13
| 211
| 0
|
'''simple docstring'''
from math import factorial
a_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a_ ( __snake_case : List[Any] ) -> str:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) )
def a_ ( __snake_case : List[Any] = 60 , __snake_case : int = 100_0000 ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
lowerCamelCase_ =0
# the cached sizes of the previous chains
lowerCamelCase_ ={}
for start_chain_element in range(1 , A_ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ =set()
lowerCamelCase_ =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A_ )
chain_set_length += 1
lowerCamelCase_ =digit_factorial_sum(A_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 357
|
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6
| 0
|
"""simple docstring"""
__a = [0, 2, 4, 6, 8]
__a = [1, 3, 5, 7, 9]
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case_ :Union[str, Any] = 0
for digit in range(10 ):
snake_case_ :Any = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _lowercase, _lowercase )
return result
snake_case_ :int = 0
for digita in range(10 ):
snake_case_ :Optional[int] = digita
if (remainder + digita) % 2 == 0:
snake_case_ :int = ODD_DIGITS
else:
snake_case_ :Dict = EVEN_DIGITS
for digita in other_parity_digits:
snake_case_ :List[str] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _lowercase, _lowercase, )
return result
def A_ ( _lowercase = 9 ):
'''simple docstring'''
snake_case_ :Any = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_lowercase, 0, [0] * length, _lowercase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 66
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66
| 1
|
'''simple docstring'''
from __future__ import annotations
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ , UpperCamelCase__ :Dict = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase__ :int = result + left + right
return input_list
def a ( __a ) -> List[Any]:
'''simple docstring'''
if len(__a ) <= 1:
return input_list
UpperCamelCase__ :Any = list(__a )
# iteration for two-way merging
UpperCamelCase__ :int = 2
while p <= len(__a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__a ) , __a ):
UpperCamelCase__ :str = i
UpperCamelCase__ :Optional[Any] = i + p - 1
UpperCamelCase__ :Optional[int] = (low + high + 1) // 2
UpperCamelCase__ :int = merge(__a , __a , __a , __a )
# final merge of last two parts
if p * 2 >= len(__a ):
UpperCamelCase__ :Dict = i
UpperCamelCase__ :Optional[Any] = merge(__a , 0 , __a , len(__a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
__snake_case = []
else:
__snake_case = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 358
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__snake_case = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = ['input_ids', 'attention_mask']
_a = DistilBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Optional[Any] = do_lower_case
UpperCamelCase__ :Optional[Any] = strip_accents
UpperCamelCase__ :List[Any] = tokenize_chinese_chars
UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :int = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 219
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( A_ ):
def __init__( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : float , **snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop("padding_side" , "right" )
A__ = kwargs.pop("return_attention_mask" , snake_case_ )
super().__init__(**snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , snake_case_ : Union[bool, str, PaddingStrategy] = True , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(snake_case_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(snake_case_ ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(snake_case_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(snake_case_ ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(snake_case_ ):
A__ = "tf"
elif is_torch_tensor(snake_case_ ):
A__ = "pt"
elif isinstance(snake_case_ , (int, float, list, tuple, np.ndarray) ):
A__ = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(snake_case_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A__ = to_numpy(snake_case_ )
else:
A__ = [to_numpy(snake_case_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=snake_case_ , max_length=snake_case_ )
A__ = processed_features[self.model_input_names[0]]
A__ = len(snake_case_ )
if not all(len(snake_case_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
A__ = []
for i in range(snake_case_ ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
snake_case_ , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , truncation=snake_case_ , )
truncated_inputs.append(snake_case_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(snake_case_ ):
# padding
A__ = self._pad(
truncated_inputs[i] , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(snake_case_ )
return BatchFeature(snake_case_ , tensor_type=snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[Dict[str, np.ndarray], BatchFeature] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ) -> dict:
'''simple docstring'''
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(snake_case_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(snake_case_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(snake_case_ ) , dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(snake_case_ )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
snake_case_ , snake_case_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
snake_case_ , snake_case_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[Dict[str, np.ndarray], BatchFeature] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ) -> Any:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(snake_case_ ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["attention_mask"][:max_length]
return processed_features
def __magic_name__ ( self : str , snake_case_ : Optional[int]=False , snake_case_ : Dict=None ) -> List[str]:
'''simple docstring'''
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(snake_case_ , snake_case_ ):
A__ = PaddingStrategy(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 230
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ViTImageProcessor'''
lowercase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , **snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : int=None , **snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
A__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , *snake_case_ : Any , **snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 230
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Any = UniSpeechSatForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
a :Tuple = downstream_dict['''projector.weight''']
a :List[Any] = downstream_dict['''projector.bias''']
a :Optional[int] = downstream_dict['''model.post_net.linear.weight''']
a :Union[str, Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
a :Dict = downstream_dict['''model.linear.weight''']
a :Any = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :Optional[int] = UniSpeechSatForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
a :Tuple = downstream_dict['''connector.weight''']
a :int = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a :Optional[int] = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a :str = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a :List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
a :str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
a :int = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
a :List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
a :str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Dict = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
a :List[str] = checkpoint['''Downstream''']
a :Optional[Any] = UniSpeechSatConfig.from_pretrained(UpperCAmelCase_ )
a :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
a :Dict = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
a :str = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
a :Dict = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForXVector''' ):
a :List[str] = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a :Union[str, Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
snake_case : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 94
|
class a__ ( snake_case__ ):
pass
class a__ ( snake_case__ ):
pass
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = [
[],
[],
[],
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_A )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = []
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(_A )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def _a ( ):
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 92
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = "x" ,UpperCamelCase_ = 10**-10 ,UpperCamelCase_ = 1 ,):
"""simple docstring"""
snake_case = symbols(UpperCamelCase__ )
snake_case = lambdify(UpperCamelCase__ ,UpperCamelCase__ )
snake_case = lambdify(UpperCamelCase__ ,diff(UpperCamelCase__ ,UpperCamelCase__ ) )
snake_case = starting_point
while True:
if diff_function(UpperCamelCase__ ) != 0:
snake_case = prev_guess - multiplicity * func(UpperCamelCase__ ) / diff_function(
UpperCamelCase__ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 367
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 213
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0
| 74
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 6
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 283
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 283
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# Load configuration defined in the metadata file
with open(_snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Dict = json.load(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = LukeConfig(use_entity_aware_attention=_snake_case ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : int = torch.load(_snake_case ,map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : Dict = load_entity_vocab(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : int = AddedToken("""<ent>""" ,lstrip=_snake_case ,rstrip=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken("""<ent2>""" ,lstrip=_snake_case ,rstrip=_snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_snake_case )
with open(os.path.join(_snake_case ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = LukeTokenizer.from_pretrained(_snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : List[str] = f'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE__ : int = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[str] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = LukeModel(config=_snake_case ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = model.load_state_dict(_snake_case ,strict=_snake_case )
if not (len(_snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {', '.join(_snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE__ : Dict = LukeTokenizer.from_pretrained(_snake_case ,task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ : str = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (39, 42)
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_snake_case ,entity_spans=[span] ,add_prefix_space=_snake_case ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Any = model(**_snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 42, 1_024) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 1, 1_024) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_snake_case ) )
model.save_pretrained(_snake_case )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = {}
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ : Dict = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 25
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : Any , _lowercase : bool = True , _lowercase : int = 32 , _lowercase : Any=PILImageResampling.BILINEAR , _lowercase : bool = True , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor
SCREAMING_SNAKE_CASE__ = resample
super().__init__(**_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : int , _lowercase : int , _lowercase : Optional[ChannelDimension] = None , **_lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(_lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = resize(_lowercase , (new_h, new_w) , resample=_lowercase , data_format=_lowercase , **_lowercase )
return image
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[ChannelDimension] = None , **_lowercase : str ):
"""simple docstring"""
return rescale(image=_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _lowercase : Optional[bool] = None , _lowercase : Optional[int] = None , _lowercase : int=None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[TensorType, str]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
SCREAMING_SNAKE_CASE__ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(_lowercase ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(_lowercase , size_divisor=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(_lowercase , scale=1 / 2_55 ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 219
| 0
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Dict = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
__A : Any = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
__A : str = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
__A : Optional[int] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
__A : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
__A : Optional[int] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
__A : Tuple = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
_UpperCAmelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase ( _SCREAMING_SNAKE_CASE : int = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [PokerHand(lowercase__ ) for hand in SORTED_HANDS]
_UpperCAmelCase = poker_hands.copy()
shuffle(lowercase__ )
_UpperCAmelCase = chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = PokerHand('''2C 4S AS 3D 5C''' )
_UpperCAmelCase = True
_UpperCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = os.path.abspath(os.path.dirname(lowercase__ ) )
_UpperCAmelCase = os.path.join(lowercase__ , '''poker_hands.txt''' )
with open(lowercase__ ) as file_hand:
for line in file_hand:
_UpperCAmelCase = line[:14].strip()
_UpperCAmelCase = line[15:].strip()
_UpperCAmelCase = PokerHand(lowercase__ ), PokerHand(lowercase__ )
_UpperCAmelCase = player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 376
| 356
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Tuple = SpeechTaTokenizer
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = True
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = SpeechTaTokenizer(__lowercase )
snake_case__ : Union[str, Any] = AddedToken('''<mask>''' ,lstrip=__lowercase ,rstrip=__lowercase )
snake_case__ : List[Any] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :List[str] ,__lowercase :str ):
snake_case__ : List[str] = '''this is a test'''
snake_case__ : Dict = '''this is a test'''
return input_text, output_text
def __lowerCamelCase ( self :int ,__lowercase :List[Any] ,__lowercase :List[Any]=False ,__lowercase :Dict=2_0 ,__lowercase :Union[str, Any]=5 ):
snake_case__ , snake_case__ : Union[str, Any] = self.get_input_output_texts(__lowercase )
snake_case__ : Any = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer.decode(__lowercase ,clean_up_tokenization_spaces=__lowercase )
return text, ids
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = '''<pad>'''
snake_case__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-4] ,'''œ''' )
self.assertEqual(vocab_keys[-2] ,'''<mask>''' )
self.assertEqual(vocab_keys[-1] ,'''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) ,8_1 )
def __lowerCamelCase ( self :List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size ,7_9 )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[int] = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : int = tokenizer.vocab_size
snake_case__ : Any = len(__lowercase )
self.assertNotEqual(__lowercase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case__ : Dict = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case__ : str = tokenizer.add_tokens(__lowercase )
snake_case__ : Tuple = tokenizer.vocab_size
snake_case__ : List[Any] = len(__lowercase )
self.assertNotEqual(__lowercase ,0 )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,len(__lowercase ) )
self.assertEqual(__lowercase ,all_size + len(__lowercase ) )
snake_case__ : int = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' ,add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
snake_case__ : List[str] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case__ : Union[str, Any] = tokenizer.add_special_tokens(__lowercase )
snake_case__ : Optional[Any] = tokenizer.vocab_size
snake_case__ : List[str] = len(__lowercase )
self.assertNotEqual(__lowercase ,0 )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,len(__lowercase ) )
self.assertEqual(__lowercase ,all_size_a + len(__lowercase ) )
snake_case__ : Optional[int] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' ,add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Dict ):
pass
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase ,[SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] ,)
snake_case__ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase ,[4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
snake_case__ : Any = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __lowerCamelCase ( self :Any ):
# Use custom sequence because this tokenizer does not handle numbers.
snake_case__ : Optional[Any] = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case__ : Tuple = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''microsoft/speecht5_asr''' ,revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' ,sequences=__lowercase ,)
| 230
|
from torch import nn
class a ( nn.Module ):
def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :int ):
super().__init__()
snake_case__ : Optional[Any] = class_size
snake_case__ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Dict = nn.Linear(__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :int ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
snake_case__ : Optional[Any] = self.mlp(__lowercase )
return logits
| 230
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
return sum(int(lowercase_ ) for x in str(factorial(lowercase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 358
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if "cls_token" in name:
A__ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
A__ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
A__ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
A__ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A__ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A__ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A__ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = '''decoder.decoder_layers.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = '''vit.encoder.layer.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 1_280
A__ = 5_120
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(lowercase_ )
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**lowercase_ )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A__ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 231
| 0
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( __a ):
def __init__( self : str , lowercase : pyspark.sql.DataFrame , lowercase : Optional[NamedSplit] = None , lowercase : Optional[Features] = None , lowercase : bool = True , lowercase : str = None , lowercase : bool = False , lowercase : str = None , lowercase : bool = True , lowercase : str = "arrow" , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , **lowercase , )
UpperCAmelCase = load_from_cache_file
UpperCAmelCase = file_format
UpperCAmelCase = Spark(
df=lowercase , features=lowercase , cache_dir=lowercase , working_dir=lowercase , **lowercase , )
def A ( self : List[str] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 34
|
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : str = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : Tuple = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : str = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : str = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Any = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : str = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : int = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : Union[str, Any] = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Optional[int] = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Any = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : Union[str, Any] = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Any = parser.parse_args()
lowercase_ : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 213
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """luke"""
def __init__( self , A_=50267 , A_=500000 , A_=768 , A_=256 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=None , A_=1 , A_=0 , A_=2 , **A_ , )-> int:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = entity_vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = entity_emb_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_entity_aware_attention
UpperCamelCase = classifier_dropout
| 251
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
UpperCamelCase = int(A_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCAmelCase_ ( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = labels
return self.pre_processor(
[text] * len(A_ ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 251
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = "altclip_text_model"
def __init__( self , __A=25_0002 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=514 , __A=1 , __A=0.02 , __A=0.02 , __A=1e-05 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=768 , **__A , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCamelCase : Dict = vocab_size
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[Any] = type_vocab_size
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Union[str, Any] = position_embedding_type
lowerCamelCase : int = use_cache
lowerCamelCase : Optional[int] = project_dim
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Any = "altclip_vision_model"
def __init__( self , __A=768 , __A=3072 , __A=512 , __A=12 , __A=12 , __A=3 , __A=224 , __A=32 , __A="quick_gelu" , __A=1e-5 , __A=0.0 , __A=0.02 , __A=1.0 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : int = projection_dim
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : int = patch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Union[str, Any] = initializer_factor
lowerCamelCase : Optional[Any] = attention_dropout
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Dict = hidden_act
@classmethod
def _snake_case ( cls , __A , **__A ):
"""simple docstring"""
cls._set_token_in_kwargs(__A )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
lowerCamelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A , **__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "altclip"
__A : Union[str, Any] = True
def __init__( self , __A=None , __A=None , __A=768 , __A=2.6592 , **__A ):
"""simple docstring"""
lowerCamelCase : Any = kwargs.pop("text_config_dict" , __A )
lowerCamelCase : str = kwargs.pop("vision_config_dict" , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase : int = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase : List[str] = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase : int = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase : int = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase : Union[str, Any] = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase : int = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase : str = {
str(__A ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase : Union[str, Any] = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase : List[str] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase : List[str] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
lowerCamelCase : List[str] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
lowerCamelCase : List[Any] = AltCLIPTextConfig(**__A )
lowerCamelCase : Tuple = AltCLIPVisionConfig(**__A )
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : Tuple = logit_scale_init_value
lowerCamelCase : Tuple = 1.0
@classmethod
def _snake_case ( cls , __A , __A , **__A ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCamelCase : List[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.vision_config.to_dict()
lowerCamelCase : Dict = self.__class__.model_type
return output
| 283
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( lowerCAmelCase__ ) -> Any:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowerCAmelCase__ ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Dict = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCAmelCase__ : List[str] = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCAmelCase__ : Optional[Any] = PipelineDataFormat.from_str(
format=lowerCAmelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowerCAmelCase__ , lowerCAmelCase__ )
class lowerCamelCase_ ( __a ):
def __init__( self : List[Any] , _A : Pipeline , _A : PipelineDataFormat ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = nlp
UpperCAmelCase__ : Optional[int] = reader
@staticmethod
def lowercase_ ( _A : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=_A , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=_A , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=_A , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=_A , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=_A , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=_A , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=_A , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._nlp, []
for entry in self._reader:
UpperCAmelCase__ : Dict = nlp(**_A ) if self._reader.is_multi_columns else nlp(_A )
if isinstance(_A , _A ):
outputs.append(_A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCAmelCase__ : Any = self._reader.save_binary(_A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_A )
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299
| 1
|
'''simple docstring'''
_lowerCamelCase : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
# Return True if there is node that has not iterated.
UpperCamelCase = [False] * len(A__ )
UpperCamelCase = [s]
UpperCamelCase = True
while queue:
UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A__ )
UpperCamelCase = True
UpperCamelCase = u
return visited[t]
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [-1] * (len(A__ ))
UpperCamelCase = 0
UpperCamelCase = []
UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(A__ , A__ , A__ , A__ ):
UpperCamelCase = float('Inf' )
UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase = min(A__ , graph[parent[s]][s] )
UpperCamelCase = parent[s]
max_flow += path_flow
UpperCamelCase = sink
while v != source:
UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase = parent[v]
for i in range(len(A__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 28
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase = """pytorch_model.bin"""
UpperCAmelCase = """pytorch_model.bin.index.json"""
UpperCAmelCase = """adapter_config.json"""
UpperCAmelCase = """adapter_model.bin"""
UpperCAmelCase = """adapter_model.safetensors"""
UpperCAmelCase = """tf_model.h5"""
UpperCAmelCase = """tf_model.h5.index.json"""
UpperCAmelCase = """model.ckpt"""
UpperCAmelCase = """flax_model.msgpack"""
UpperCAmelCase = """flax_model.msgpack.index.json"""
UpperCAmelCase = """model.safetensors"""
UpperCAmelCase = """model.safetensors.index.json"""
UpperCAmelCase = """config.json"""
UpperCAmelCase = """preprocessor_config.json"""
UpperCAmelCase = FEATURE_EXTRACTOR_NAME
UpperCAmelCase = """generation_config.json"""
UpperCAmelCase = """modelcard.json"""
UpperCAmelCase = """▁"""
UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
if version.parse(SCREAMING_SNAKE_CASE ) < version.parse(SCREAMING_SNAKE_CASE ):
if "dev" in min_version:
snake_case_ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
snake_case_ = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 353
|
# Function to print upper half of diamond (pyramid)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 267
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.