code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = (DDPMScheduler,)
def lowerCamelCase_ ( self : Optional[int] , **UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[str] ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.scheduler_classes[0]
lowercase : Dict =self.get_scheduler_config()
lowercase : Union[str, Any] =scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =self.scheduler_classes[0]
lowercase : Optional[Any] =self.get_scheduler_config()
lowercase : int =scheduler_class(**UpperCAmelCase__ )
lowercase : Dict =len(UpperCAmelCase__ )
lowercase : Optional[int] =self.dummy_model()
lowercase : Optional[Any] =self.dummy_sample_deter
lowercase : Union[str, Any] =torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowercase : List[str] =model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase : List[Any] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : str =pred_prev_sample
lowercase : Any =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : List[str] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =self.scheduler_classes[0]
lowercase : Any =self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase : Dict =scheduler_class(**UpperCAmelCase__ )
lowercase : int =len(UpperCAmelCase__ )
lowercase : int =self.dummy_model()
lowercase : int =self.dummy_sample_deter
lowercase : int =torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowercase : Dict =model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase : List[str] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Optional[Any] =pred_prev_sample
lowercase : int =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : Tuple =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =self.scheduler_classes[0]
lowercase : Tuple =self.get_scheduler_config()
lowercase : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
lowercase : Tuple =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
lowercase : Any =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
lowercase : List[Any] =-1
else:
lowercase : Optional[int] =timesteps[i + 1]
lowercase : Dict =scheduler.previous_timestep(UpperCAmelCase__ )
lowercase : Optional[int] =prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =self.scheduler_classes[0]
lowercase : List[Any] =self.get_scheduler_config()
lowercase : int =scheduler_class(**UpperCAmelCase__ )
lowercase : Optional[int] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.scheduler_classes[0]
lowercase : Tuple =self.get_scheduler_config()
lowercase : Optional[int] =scheduler_class(**UpperCAmelCase__ )
lowercase : Optional[int] =[100, 87, 50, 1, 0]
lowercase : Any =len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.scheduler_classes[0]
lowercase : str =self.get_scheduler_config()
lowercase : List[Any] =scheduler_class(**UpperCAmelCase__ )
lowercase : str =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
lowercase : str =0
lowercase : Dict =0
while num > 0:
lowercase : Any =num % 8
lowercase : Any =octal + (remainder * math.floor(math.pow(10 , __magic_name__ ) ))
counter += 1
lowercase : List[str] =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(__magic_name__ )}'''
def _lowerCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
lowercase : List[str] =parent
lowercase : Union[str, Any] =13
lowercase : Union[str, Any] =7
lowercase : Any =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : Union[str, Any] =True
lowercase : List[str] =True
lowercase : Union[str, Any] =99
lowercase : Optional[Any] =[10, 50, 80]
lowercase : Union[str, Any] =32
lowercase : Any =32
lowercase : int =4
lowercase : Dict =8
lowercase : Any =128
lowercase : Dict =2
lowercase : str =2
lowercase : str =None
lowercase : Tuple =1
lowercase : Optional[int] =0
lowercase : int =3
lowercase : Any =self.vocab_size - 1
lowercase : Dict =0.01
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : Dict ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : List[Any] =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Union[str, Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : Dict =model([input_ids_a, mems_a] ).to_tuple()
lowercase : Optional[int] ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : int =config_and_inputs
lowercase : Any ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[Any] =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : str =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Tuple =model.get_bias()
assert name is None
else:
lowercase : List[Any] =model.get_output_embeddings()
assert x is None
lowercase : int =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Optional[int] =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Tuple =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : Dict =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""ChineseCLIPFeatureExtractor"""]
UpperCamelCase_ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = XLNetTokenizer
lowerCamelCase_ = XLNetTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : int =XLNetTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : str ='''<s>'''
lowercase : List[str] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1006 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =XLNetTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] )
lowercase : Union[str, Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowercase : Dict =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =XLNetTokenizer(UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ )
lowercase : Any =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =XLNetTokenizer(UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
lowercase : List[Any] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : List[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# fmt: off
lowercase : List[str] ={'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'data2vec-text'
def __init__( self : str , UpperCAmelCase__ : Any=30522 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Tuple=3072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]="absolute" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[Any] =vocab_size
lowercase : Union[str, Any] =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : List[Any] =num_attention_heads
lowercase : List[Any] =hidden_act
lowercase : Optional[int] =intermediate_size
lowercase : Dict =hidden_dropout_prob
lowercase : int =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : Any =type_vocab_size
lowercase : List[Any] =initializer_range
lowercase : Dict =layer_norm_eps
lowercase : Optional[int] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Any =classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Dict ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
import numpy as np
import datasets
UpperCamelCase_ = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCamelCase_ = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCamelCase_ = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
# convert to numpy arrays
lowercase : Optional[int] =np.array(UpperCAmelCase__ )
lowercase : Dict =np.array(UpperCAmelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase : Optional[int] =X - np.mean(UpperCAmelCase__ )
lowercase : str =np.cov(reference_distribution.T )
try:
lowercase : Union[str, Any] =np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
lowercase : Optional[int] =np.linalg.pinv(UpperCAmelCase__ )
lowercase : Any =np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =np.dot(UpperCAmelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'BlipImageProcessor'
lowerCamelCase_ = 'AutoTokenizer'
def __init__( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
# add QFormer tokenizer
lowercase : Optional[int] =qformer_tokenizer
def __call__( self : Union[str, Any] , UpperCAmelCase__ : ImageInput = None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase : List[str] =BatchFeature()
if text is not None:
lowercase : str =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
encoding.update(UpperCAmelCase__ )
lowercase : Optional[int] =self.qformer_tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Any =qformer_text_encoding.pop('''input_ids''' )
lowercase : Optional[Any] =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase : Union[str, Any] =self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
encoding.update(UpperCAmelCase__ )
return encoding
def lowerCamelCase_ ( self : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer.model_input_names
lowercase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int , **UpperCAmelCase__ : str ):
'''simple docstring'''
if os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowercase : Tuple =os.path.join(UpperCAmelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase__ )
return super().save_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =AutoTokenizer.from_pretrained(UpperCAmelCase__ , subfolder='''qformer_tokenizer''' )
lowercase : int =cls._get_arguments_from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
args.append(UpperCAmelCase__ )
return cls(*UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , UpperCAmelCase__ : int = 0 ):
'''simple docstring'''
lowercase : Optional[Any] =key
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase__ ) ^ key ) for ch in content]
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCAmelCase__ ) ^ key ) for ch in content]
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase : Tuple =''''''
for ch in content:
ans += chr(ord(UpperCAmelCase__ ) ^ key )
return ans
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase : List[Any] =''''''
for ch in content:
ans += chr(ord(UpperCAmelCase__ ) ^ key )
return ans
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
try:
with open(UpperCAmelCase__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCAmelCase__ , UpperCAmelCase__ ) )
except OSError:
return False
return True
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
try:
with open(UpperCAmelCase__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCAmelCase__ , UpperCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = CustomTokenizer
pass
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 10
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str =10
lowercase : Dict =self.get_scheduler_config()
lowercase : Union[str, Any] =self.scheduler_classes[0](**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
lowercase : Optional[int] =scheduler.timesteps[0]
lowercase : List[Any] =scheduler.timesteps[1]
lowercase : List[Any] =self.dummy_sample
lowercase : Tuple =0.1 * sample
lowercase : Union[str, Any] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
lowercase : Optional[int] =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =self.scheduler_classes[0]
lowercase : Dict =self.get_scheduler_config()
lowercase : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
lowercase : Optional[int] =1
scheduler.set_timesteps(UpperCAmelCase__ )
lowercase : Optional[int] =scheduler.timesteps
lowercase : Optional[int] =torch.manual_seed(0 )
lowercase : Union[str, Any] =self.dummy_model()
lowercase : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase__ ):
# 1. scale model input
lowercase : Tuple =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
lowercase : Optional[Any] =model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
lowercase : Tuple =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : List[Any] =pred_prev_sample
lowercase : int =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : List[str] =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.scheduler_classes[0]
lowercase : Optional[Any] =self.get_scheduler_config()
lowercase : str =scheduler_class(**UpperCAmelCase__ )
lowercase : Union[str, Any] =[106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
lowercase : Any =scheduler.timesteps
lowercase : Dict =torch.manual_seed(0 )
lowercase : Union[str, Any] =self.dummy_model()
lowercase : Tuple =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase : Optional[Any] =scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
lowercase : str =model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
lowercase : Tuple =scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Dict =pred_prev_sample
lowercase : str =torch.sum(torch.abs(UpperCAmelCase__ ) )
lowercase : int =torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] =self.scheduler_classes[0]
lowercase : Dict =self.get_scheduler_config()
lowercase : str =scheduler_class(**UpperCAmelCase__ )
lowercase : Any =[39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[Any] =self.scheduler_classes[0]
lowercase : Dict =self.get_scheduler_config()
lowercase : Dict =scheduler_class(**UpperCAmelCase__ )
lowercase : Optional[Any] =[39, 30, 12, 1, 0]
lowercase : str =len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.scheduler_classes[0]
lowercase : Union[str, Any] =self.get_scheduler_config()
lowercase : Optional[Any] =scheduler_class(**UpperCAmelCase__ )
lowercase : Optional[Any] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__magic_name__ ), magnitude * sin(__magic_name__ )]
return [magnitude * cos(radians(__magic_name__ ) ), magnitude * sin(radians(__magic_name__ ) )]
def _lowerCAmelCase ( __magic_name__ : NDArray[floataa] , __magic_name__ : NDArray[floataa] , __magic_name__ : float = 10**-1 ) -> bool:
lowercase : NDArray[floataa] =cross(__magic_name__ , __magic_name__ )
lowercase : float =sum(__magic_name__ )
return abs(__magic_name__ ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase_ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCamelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCamelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase_ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCamelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase_ = False
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionAttendAndExcitePipeline
lowerCamelCase_ = False
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
lowercase : List[str] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Optional[Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Dict =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Optional[int] ={
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : int ='''cpu'''
lowercase : Tuple =self.get_dummy_components()
lowercase : Optional[Any] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Union[str, Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowercase : Optional[Any] =np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowercase : List[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =torch.manual_seed(51 )
lowercase : Any =StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase : Dict ='''a painting of an elephant with glasses'''
lowercase : Optional[int] =[5, 7]
lowercase : Union[str, Any] =pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
lowercase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionPanoramaPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Any =DDIMScheduler()
torch.manual_seed(0 )
lowercase : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=0 ):
'''simple docstring'''
lowercase : Tuple =torch.manual_seed(UpperCAmelCase__ )
lowercase : List[Any] ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] =self.get_dummy_components()
lowercase : Dict =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[str] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : int =self.get_dummy_components()
lowercase : List[Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Tuple =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : str ='''french fries'''
lowercase : str =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : Dict =output.images
lowercase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Optional[Any] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] =self.get_dummy_components()
lowercase : Optional[Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Tuple =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ , view_batch_size=2 )
lowercase : Dict =output.images
lowercase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[int] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Tuple =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Tuple =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__ )
lowercase : str =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=0 ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.manual_seed(UpperCAmelCase__ )
lowercase : int ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int ='''stabilityai/stable-diffusion-2-base'''
lowercase : List[Any] =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : List[Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
lowercase : Union[str, Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : Any =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__ )
lowercase : Tuple =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : Any =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : int =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : List[str] =latents[0, -3:, -3:, -1]
lowercase : Any =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Optional[int] =False
lowercase : Any ='''stabilityai/stable-diffusion-2-base'''
lowercase : str =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : List[Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : List[Any] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[int] ='''stabilityai/stable-diffusion-2-base'''
lowercase : int =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : str =self.get_inputs()
lowercase : Any =pipe(**UpperCAmelCase__ )
lowercase : Optional[Any] =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any]=10000 , UpperCAmelCase__ : Tuple=6 , UpperCAmelCase__ : List[str]=2048 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=1024 , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int =vocab_size
lowercase : int =d_model
lowercase : str =decoder_ffn_dim
lowercase : List[Any] =decoder_layers
lowercase : Optional[Any] =decoder_attention_heads
lowercase : Union[str, Any] =dropout
lowercase : Union[str, Any] =attention_dropout
lowercase : Dict =activation_dropout
lowercase : Union[str, Any] =activation_function
lowercase : Any =init_std
lowercase : Tuple =decoder_layerdrop
lowercase : List[str] =use_cache
lowercase : int =decoder_layers
lowercase : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : List[str] =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[str] =question_encoder
lowercase : Tuple =generator
lowercase : Any =self.question_encoder
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
if os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowercase : Any =os.path.join(UpperCAmelCase__ , '''question_encoder_tokenizer''' )
lowercase : Union[str, Any] =os.path.join(UpperCAmelCase__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(UpperCAmelCase__ )
self.generator.save_pretrained(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Tuple , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowercase : Optional[Any] =kwargs.pop('''config''' , UpperCAmelCase__ )
if config is None:
lowercase : Dict =RagConfig.from_pretrained(UpperCAmelCase__ )
lowercase : Tuple =AutoTokenizer.from_pretrained(
UpperCAmelCase__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
lowercase : List[str] =AutoTokenizer.from_pretrained(
UpperCAmelCase__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=UpperCAmelCase__ , generator=UpperCAmelCase__ )
def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return self.current_tokenizer(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return self.generator.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return self.generator.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] =self.question_encoder
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =self.generator
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "longest" , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , UpperCAmelCase__ , )
if max_length is None:
lowercase : int =self.current_tokenizer.model_max_length
lowercase : List[str] =self(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowercase : Any =self.current_tokenizer.model_max_length
lowercase : int =self(
text_target=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Dict =labels['''input_ids''']
return model_inputs
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase_ = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int ={}
if self.train_dir is not None:
lowercase : str =self.train_dir
if self.validation_dir is not None:
lowercase : str =self.validation_dir
lowercase : Any =data_files if data_files else None
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase_ = field(default=lowercase__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase_ = field(
default=0.7_5 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[Any]:
lowercase : Dict =torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Tuple =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : Union[str, Any] =training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase : Tuple =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : List[Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase : int =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase : Any =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
lowercase : Optional[Any] =ds['''train'''].train_test_split(data_args.train_val_split )
lowercase : Optional[Any] =split['''train''']
lowercase : Any =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase : List[str] =ViTMAEConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : Optional[Any] =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase : Optional[int] =ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase : Dict =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase : Optional[Any] =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase : Dict =ViTMAEForPreTraining(__magic_name__ )
if training_args.do_train:
lowercase : Union[str, Any] =ds['''train'''].column_names
else:
lowercase : Any =ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase : int =data_args.image_column_name
elif "image" in column_names:
lowercase : int ='''image'''
elif "img" in column_names:
lowercase : List[Any] ='''img'''
else:
lowercase : Any =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase : List[str] =image_processor.size['''shortest_edge''']
else:
lowercase : Union[str, Any] =(image_processor.size['''height'''], image_processor.size['''width'''])
lowercase : Union[str, Any] =Compose(
[
Lambda(lambda __magic_name__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__magic_name__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__magic_name__ : int ):
lowercase : str =[transforms(__magic_name__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase : Optional[Any] =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase : Union[str, Any] =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Compute absolute learning rate
lowercase : Union[str, Any] =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase : str =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase : Union[str, Any] =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
lowercase : int =None
if training_args.resume_from_checkpoint is not None:
lowercase : Union[str, Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Optional[int] =last_checkpoint
lowercase : Any =trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase : Optional[Any] =trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
lowercase : Any ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = BlenderbotSmallConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=20 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=0 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : Optional[int] =seq_length
lowercase : Tuple =is_training
lowercase : List[str] =use_labels
lowercase : List[Any] =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : List[str] =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : List[Any] =eos_token_id
lowercase : Union[str, Any] =pad_token_id
lowercase : Tuple =bos_token_id
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Optional[int] =tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Dict =prepare_blenderbot_small_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =TFBlenderbotSmallModel(config=UpperCAmelCase__ ).get_decoder()
lowercase : str =inputs_dict['''input_ids''']
lowercase : Union[str, Any] =input_ids[:1, :]
lowercase : List[str] =inputs_dict['''attention_mask'''][:1, :]
lowercase : Union[str, Any] =inputs_dict['''head_mask''']
lowercase : int =1
# first forward pass
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : int =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : int =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : int =tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Optional[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : Union[str, Any] =output_from_no_past[:, -3:, random_slice_idx]
lowercase : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[int]=None , __magic_name__ : Tuple=None , __magic_name__ : int=None , __magic_name__ : Any=None , __magic_name__ : List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
lowercase : Optional[Any] =tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Dict =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =TFBlenderbotSmallModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowerCamelCase_ = 'facebook/blenderbot_small-90M'
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase : Optional[int] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase__ , )
lowercase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( __magic_name__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __magic_name__ : int = 10001 ) -> int:
lowercase : Dict =0
lowercase : int =1
while count != nth and number < 3:
number += 1
if is_prime(__magic_name__ ):
count += 1
while count != nth:
number += 2
if is_prime(__magic_name__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = TransfoXLTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowercase : int =[
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Tuple =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''<unk> UNwanted , running'''
lowercase : str ='''<unk> unwanted, running'''
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(UpperCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [0, 4, 8, 7] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : int =TransfoXLTokenizer(lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =TransfoXLTokenizer(lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =TransfoXLTokenizer(lower_case=UpperCAmelCase__ )
lowercase : Tuple ='''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase : Dict =[
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizer()
lowercase : int =len(UpperCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : str ) -> YolosConfig:
lowercase : Optional[Any] =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase : List[Any] =192
lowercase : Union[str, Any] =768
lowercase : List[Any] =12
lowercase : Tuple =3
lowercase : Tuple =[800, 1333]
lowercase : List[str] =False
elif yolos_name == "yolos_s_dWr":
lowercase : Union[str, Any] =330
lowercase : Union[str, Any] =14
lowercase : int =6
lowercase : Tuple =1320
elif "yolos_s" in yolos_name:
lowercase : Optional[int] =384
lowercase : int =1536
lowercase : Union[str, Any] =12
lowercase : List[str] =6
elif "yolos_b" in yolos_name:
lowercase : Union[str, Any] =[800, 1344]
lowercase : Union[str, Any] =91
lowercase : int ='''huggingface/label-files'''
lowercase : str ='''coco-detection-id2label.json'''
lowercase : List[str] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[Any] ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Optional[int] =idalabel
lowercase : Any ={v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : YolosConfig , __magic_name__ : bool = False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : List[Any] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase : Tuple =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase : Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowercase : Any =in_proj_bias[: config.hidden_size]
lowercase : int =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Union[str, Any] =in_proj_weight[-config.hidden_size :, :]
lowercase : List[Any] =in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
if "backbone" in name:
lowercase : Optional[int] =name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowercase : int =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowercase : Tuple =name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowercase : Optional[int] =name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowercase : Union[str, Any] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Optional[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowercase : Optional[Any] =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase : Dict =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : str =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Dict =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : str =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : Optional[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowercase : List[Any] =name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowercase : int =name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowercase : str =name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
lowercase : List[str] =orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase : Optional[int] =key.split('''.''' )
lowercase : int =int(key_split[2] )
lowercase : str =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase : Any =val[:dim, :]
lowercase : Tuple =val[
dim : dim * 2, :
]
lowercase : Optional[Any] =val[-dim:, :]
else:
lowercase : List[Any] =val[:dim]
lowercase : List[str] =val[dim : dim * 2]
lowercase : Optional[Any] =val[-dim:]
else:
lowercase : Optional[Any] =val
return orig_state_dict
def _lowerCAmelCase ( ) -> torch.Tensor:
lowercase : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : bool = False ) -> Union[str, Any]:
lowercase : List[Any] =get_yolos_config(__magic_name__ )
# load original state_dict
lowercase : Optional[int] =torch.load(__magic_name__ , map_location='''cpu''' )['''model''']
# load 🤗 model
lowercase : Any =YolosForObjectDetection(__magic_name__ )
model.eval()
lowercase : Optional[Any] =convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase : Optional[int] =800 if yolos_name != '''yolos_ti''' else 512
lowercase : Any =YolosImageProcessor(format='''coco_detection''' , size=__magic_name__ )
lowercase : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : List[Any] =model(**__magic_name__ )
lowercase , lowercase : List[str] =outputs.logits, outputs.pred_boxes
lowercase , lowercase : Any =None, None
if yolos_name == "yolos_ti":
lowercase : Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowercase : int =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowercase : List[str] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowercase : List[Any] =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowercase : int =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowercase : Dict =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowercase : Optional[Any] =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowercase : List[Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowercase : Union[str, Any] =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowercase : List[str] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1E-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
lowercase : str ={
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowercase : Optional[Any] =model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization='''hustvl''' )
model.push_to_hub(__magic_name__ , organization='''hustvl''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = LDMTextToImagePipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Any =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : Dict =AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Optional[Any] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Tuple =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Tuple ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Dict =self.get_dummy_components()
lowercase : Optional[Any] =LDMTextToImagePipeline(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : str =pipe(**UpperCAmelCase__ ).images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowercase : Tuple =np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=torch.floataa , UpperCAmelCase__ : str=0 ):
'''simple docstring'''
lowercase : str =torch.manual_seed(UpperCAmelCase__ )
lowercase : Union[str, Any] =np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
lowercase : str =torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
lowercase : Tuple ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int =self.get_inputs(UpperCAmelCase__ )
lowercase : Tuple =pipe(**UpperCAmelCase__ ).images
lowercase : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowercase : List[Any] =np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
lowercase : str =np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=torch.floataa , UpperCAmelCase__ : Any=0 ):
'''simple docstring'''
lowercase : Optional[Any] =torch.manual_seed(UpperCAmelCase__ )
lowercase : str =np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
lowercase : Union[str, Any] =torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
lowercase : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =self.get_inputs(UpperCAmelCase__ )
lowercase : List[Any] =pipe(**UpperCAmelCase__ ).images[0]
lowercase : Tuple =load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
lowercase : str =np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase : Optional[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=16000 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : Optional[Any] =self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors='''pt''' , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
lowercase : str =self.speech_model.generate(UpperCAmelCase__ , max_length=480000 )
lowercase : Union[str, Any] =self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : int =1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : List[str] =len(UpperCAmelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase__ )}.''' )
# get prompt text embeddings
lowercase : Dict =self.tokenizer(
UpperCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowercase : Dict =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase : Any =text_input_ids[:, : self.tokenizer.model_max_length]
lowercase : Optional[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase : str =text_embeddings.shape
lowercase : Dict =text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
lowercase : Dict =text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : Union[str, Any] =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str]
if negative_prompt is None:
lowercase : str =[''''''] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !='''
F''' {type(UpperCAmelCase__ )}.''' )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Union[str, Any] =[negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
lowercase : str =negative_prompt
lowercase : Union[str, Any] =text_input_ids.shape[-1]
lowercase : Union[str, Any] =self.tokenizer(
UpperCAmelCase__ , padding='''max_length''' , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' , )
lowercase : Dict =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : Tuple =uncond_embeddings.shape[1]
lowercase : Union[str, Any] =uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
lowercase : Any =uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Union[str, Any] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : List[str] =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase : str =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase : Dict =torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device='''cpu''' , dtype=UpperCAmelCase__ ).to(
self.device )
else:
lowercase : Optional[Any] =torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase : Dict =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase : Any =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : List[str] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Dict ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : Tuple ={}
if accepts_eta:
lowercase : int =eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : str =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Optional[int] =self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
lowercase : Union[str, Any] =self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase : Optional[Any] =noise_pred.chunk(2 )
lowercase : Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase : List[Any] =self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =1 / 0.1_82_15 * latents
lowercase : List[str] =self.vae.decode(UpperCAmelCase__ ).sample
lowercase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase : int =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase : str =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowerCamelCase_ = ['accelerate', 'launch']
lowerCamelCase_ = Path.home() / '.cache/huggingface/accelerate'
lowerCamelCase_ = 'default_config.yaml'
lowerCamelCase_ = config_folder / config_file
lowerCamelCase_ = config_folder / '_default_config.yaml'
lowerCamelCase_ = Path('tests/test_configs' )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=UpperCAmelCase__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(UpperCAmelCase__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'test-tpu'
lowerCamelCase_ = 'us-central1-a'
lowerCamelCase_ = 'ls'
lowerCamelCase_ = ['accelerate', 'tpu-config']
lowerCamelCase_ = 'cd /usr/share'
lowerCamelCase_ = 'tests/test_samples/test_command_file.sh'
lowerCamelCase_ = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str =run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCAmelCase__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Tuple =run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCAmelCase__ , )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'gptsan-japanese'
lowerCamelCase_ = [
'past_key_values',
]
lowerCamelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , UpperCAmelCase__ : Optional[int]=36000 , UpperCAmelCase__ : int=1280 , UpperCAmelCase__ : int=1024 , UpperCAmelCase__ : int=8192 , UpperCAmelCase__ : Any=4096 , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : int=128 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Optional[Any]="float32" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Dict=0.0_02 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=35998 , UpperCAmelCase__ : Any=35995 , UpperCAmelCase__ : Any=35999 , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : List[Any] =max_position_embeddings
lowercase : List[Any] =d_model
lowercase : Union[str, Any] =d_ff
lowercase : int =d_ext
lowercase : Optional[int] =d_spout
lowercase : int =num_switch_layers
lowercase : Union[str, Any] =num_ext_layers
lowercase : str =num_switch_layers + num_ext_layers
lowercase : Any =num_heads
lowercase : Union[str, Any] =num_experts
lowercase : Any =expert_capacity
lowercase : str =dropout_rate
lowercase : Optional[Any] =layer_norm_epsilon
lowercase : List[str] =router_bias
lowercase : int =router_jitter_noise
lowercase : Any =router_dtype
lowercase : Union[str, Any] =router_ignore_padding_tokens
lowercase : Any =output_hidden_states
lowercase : Optional[int] =output_attentions
lowercase : List[Any] =initializer_factor
lowercase : Any =output_router_logits
lowercase : Dict =use_cache
super().__init__(
separator_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Any =real
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : int =[1] * rank
else:
lowercase : Optional[int] =rank
def __repr__( self : List[Any] ):
'''simple docstring'''
return (
F'''{self.real}+'''
F'''{"+".join(str(UpperCAmelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCAmelCase__ )
def __add__( self : Any , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return Dual(self.real + other , self.duals )
lowercase : Tuple =self.duals.copy()
lowercase : str =other.duals.copy()
if len(UpperCAmelCase__ ) > len(UpperCAmelCase__ ):
o_dual.extend([1] * (len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )) )
elif len(UpperCAmelCase__ ) < len(UpperCAmelCase__ ):
s_dual.extend([1] * (len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )) )
lowercase : Optional[Any] =[]
for i in range(len(UpperCAmelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCAmelCase__ )
lowerCamelCase_ = __add__
def __sub__( self : List[str] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : str =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCAmelCase__ )
lowercase : Any =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCAmelCase__ )
lowerCamelCase_ = __mul__
def __truediv__( self : int , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Any =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCAmelCase__ )
raise ValueError
def __floordiv__( self : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Dict =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCAmelCase__ )
raise ValueError
def __pow__( self : Any , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
if n < 0 or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
lowercase : Optional[int] =self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Any:
if not callable(__magic_name__ ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(__magic_name__ , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''differentiate() requires an int as input for order''' )
lowercase : Optional[int] =Dual(__magic_name__ , 1 )
lowercase : Tuple =func(__magic_name__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'BlipImageProcessor'
lowerCamelCase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =self.image_processor
def __call__( self : List[str] , UpperCAmelCase__ : ImageInput = None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase : str =self.tokenizer
lowercase : Optional[Any] =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
# add pixel_values
lowercase : List[Any] =self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
if text is not None:
lowercase : Tuple =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
else:
lowercase : Optional[Any] =None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def lowerCamelCase_ ( self : Union[str, Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer.model_input_names
lowercase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'switch_transformers'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : int , UpperCAmelCase__ : Optional[int]=32128 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Optional[Any]=2048 , UpperCAmelCase__ : List[str]=64 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Tuple=8 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Union[str, Any]=0.01 , UpperCAmelCase__ : Optional[Any]="float32" , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=1E-6 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : List[str]=0.0_01 , UpperCAmelCase__ : Tuple=1.0 , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Optional[Any]=1 , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : Union[str, Any] =vocab_size
lowercase : Optional[Any] =d_model
lowercase : int =d_kv
lowercase : Optional[int] =d_ff
lowercase : Union[str, Any] =num_sparse_encoder_layers
lowercase : List[Any] =num_layers
lowercase : List[Any] =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase : Optional[int] =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase : int =self.num_layers // self.num_sparse_encoder_layers
else:
lowercase : List[Any] =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase : Any =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase : Union[str, Any] =self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase : Dict =num_heads
lowercase : Union[str, Any] =num_experts
lowercase : List[Any] =expert_capacity
lowercase : List[str] =router_bias
lowercase : Tuple =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase : str =router_dtype
lowercase : List[str] =router_ignore_padding_tokens
lowercase : List[str] =relative_attention_num_buckets
lowercase : Optional[Any] =relative_attention_max_distance
lowercase : Optional[int] =dropout_rate
lowercase : Any =layer_norm_epsilon
lowercase : Union[str, Any] =initializer_factor
lowercase : Dict =feed_forward_proj
lowercase : Union[str, Any] =use_cache
lowercase : Union[str, Any] =add_router_probs
lowercase : str =router_z_loss_coef
lowercase : Optional[int] =router_aux_loss_coef
lowercase : Union[str, Any] =self.feed_forward_proj.split('''-''' )
lowercase : Optional[int] =act_info[-1]
lowercase : Tuple =act_info[0] == '''gated'''
if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase : Dict ='''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCamelCase_ = """▁"""
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase : str =AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : Optional[Any] =vocab_file
lowercase : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : Dict ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : Dict =len(self.sp_model ) - 1
lowercase : Dict ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Tuple =[self.cls_token_id]
lowercase : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1]
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[Any] =self.sp_model.PieceToId(UpperCAmelCase__ )
return spm_id if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =[]
lowercase : Any =''''''
lowercase : Union[str, Any] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowercase : Union[str, Any] =True
lowercase : List[str] =[]
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowercase : List[str] =False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __getstate__( self : List[str] ):
'''simple docstring'''
lowercase : List[str] =self.__dict__.copy()
lowercase : Union[str, Any] =None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Optional[int] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : int =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> list:
lowercase : Optional[int] =int(__magic_name__ )
if n_element < 1:
lowercase : Dict =ValueError('''a should be a positive number''' )
raise my_error
lowercase : str =[1]
lowercase , lowercase , lowercase : List[Any] =(0, 0, 0)
lowercase : Any =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
UpperCamelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'mctct'
def __init__( self : Any , UpperCAmelCase__ : str=8065 , UpperCAmelCase__ : str=1536 , UpperCAmelCase__ : int=36 , UpperCAmelCase__ : Union[str, Any]=6144 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : str=384 , UpperCAmelCase__ : List[str]=920 , UpperCAmelCase__ : Dict=1E-5 , UpperCAmelCase__ : Any=0.3 , UpperCAmelCase__ : int="relu" , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[Any]=0.3 , UpperCAmelCase__ : Dict=0.3 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[Any]=0.3 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Dict=(7,) , UpperCAmelCase__ : Any=(3,) , UpperCAmelCase__ : Union[str, Any]=80 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]="sum" , UpperCAmelCase__ : Optional[Any]=False , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowercase : str =vocab_size
lowercase : int =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Optional[Any] =intermediate_size
lowercase : Tuple =num_attention_heads
lowercase : Tuple =attention_head_dim
lowercase : str =max_position_embeddings
lowercase : str =layer_norm_eps
lowercase : int =layerdrop
lowercase : Dict =hidden_act
lowercase : List[str] =initializer_range
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Dict =pad_token_id
lowercase : Union[str, Any] =bos_token_id
lowercase : Optional[int] =eos_token_id
lowercase : Optional[Any] =conv_glu_dim
lowercase : Any =conv_dropout
lowercase : int =num_conv_layers
lowercase : Optional[Any] =input_feat_per_channel
lowercase : Optional[int] =input_channels
lowercase : List[str] =conv_channels
lowercase : Optional[Any] =ctc_loss_reduction
lowercase : Optional[Any] =ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase : Optional[int] =list(UpperCAmelCase__ )
lowercase : Optional[Any] =list(UpperCAmelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__magic_name__ ) // (factorial(__magic_name__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=UpperCAmelCase__ , )
assert hasattr(self , '''env''' )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : str =F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
lowercase : str ={'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase__ , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase__ , py_version='''py36''' , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
# create estimator
lowercase : Dict =self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
lowercase : Any =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase : Tuple =list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase : Optional[int] =list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase : Dict =(
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , '''depth_multiplier''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Union[str, Any]=0.25 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]="relu6" , UpperCAmelCase__ : List[Any]=1280 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : int =batch_size
lowercase : Any =num_channels
lowercase : Optional[Any] =image_size
lowercase : Optional[Any] =depth_multiplier
lowercase : int =depth_divisible_by
lowercase : str =min_depth
lowercase : List[str] =expand_ratio
lowercase : Dict =tf_padding
lowercase : Any =output_stride
lowercase : Any =first_layer_is_expansion
lowercase : Union[str, Any] =finegrained_output
lowercase : Union[str, Any] =hidden_act
lowercase : Union[str, Any] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase : Optional[Any] =classifier_dropout_prob
lowercase : Optional[int] =use_labels
lowercase : List[str] =is_training
lowercase : Tuple =num_labels
lowercase : List[Any] =initializer_range
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int =None
lowercase : Any =None
if self.use_labels:
lowercase : str =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Optional[int] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Dict =MobileNetVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =self.num_labels
lowercase : str =MobileNetVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Optional[int] =self.num_labels
lowercase : Any =MobileNetVaForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase : Optional[Any] =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =MobileNetVaModelTester(self )
lowercase : Any =MobileNetVaConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int =model_class(UpperCAmelCase__ )
lowercase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : str =[*signature.parameters.keys()]
lowercase : str =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ):
lowercase : Tuple =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : str =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Union[str, Any] =outputs.hidden_states
lowercase : int =16
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : int =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] =MobileNetVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> Tuple:
lowercase : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(UpperCAmelCase__ )
lowercase : Optional[int] =self.default_image_processor
lowercase : Optional[int] =prepare_img()
lowercase : List[str] =image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : str =torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Any =torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[int] =MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase : List[str] =model.to(UpperCAmelCase__ )
lowercase : List[Any] =MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase : List[Any] =prepare_img()
lowercase : Any =image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Optional[Any] =model(**UpperCAmelCase__ )
lowercase : Any =outputs.logits
# verify the logits
lowercase : Dict =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
lowercase : List[str] =torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=UpperCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCAmelCase ( __magic_name__ : int = 2000000 ) -> int:
lowercase : list[int] =[0]
lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int =0
# the area corresponding to the grid that gives the product closest to target
lowercase : int =0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase : str =(-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str =floor(__magic_name__ )
lowercase : Optional[int] =ceil(__magic_name__ )
lowercase : int =triangle_numbers[b_floor]
lowercase : int =triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : List[str] =triangle_b_first_guess * triangle_a
lowercase : int =idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : int =triangle_b_second_guess * triangle_a
lowercase : int =idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : int ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__magic_name__ , __magic_name__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowercase : Optional[int] =rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase : List[str] =years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = 256
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['melgan']
def __init__( self : Tuple , UpperCAmelCase__ : SpectrogramNotesEncoder , UpperCAmelCase__ : SpectrogramContEncoder , UpperCAmelCase__ : TaFilmDecoder , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : OnnxRuntimeModel if is_onnx_available() else Any , ):
'''simple docstring'''
super().__init__()
# From MELGAN
lowercase : List[str] =math.log(1E-5 ) # Matches MelGAN training.
lowercase : Any =4.0 # Largest value for most examples
lowercase : str =128
self.register_modules(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=(-1.0, 1.0) , UpperCAmelCase__ : List[Any]=False ):
'''simple docstring'''
lowercase , lowercase : Optional[int] =output_range
if clip:
lowercase : Dict =torch.clip(UpperCAmelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase : List[str] =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=(-1.0, 1.0) , UpperCAmelCase__ : Optional[int]=False ):
'''simple docstring'''
lowercase , lowercase : Optional[int] =input_range
lowercase : List[Any] =torch.clip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if clip else outputs
# Scale to [0, 1].
lowercase : Union[str, Any] =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Optional[int] =input_tokens > 0
lowercase , lowercase : Optional[Any] =self.notes_encoder(
encoder_input_tokens=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
lowercase , lowercase : Union[str, Any] =self.continuous_encoder(
encoder_inputs=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =noise_time
if not torch.is_tensor(UpperCAmelCase__ ):
lowercase : Dict =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
lowercase : Any =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase : Optional[Any] =self.decoder(
encodings_and_masks=UpperCAmelCase__ , decoder_input_tokens=UpperCAmelCase__ , decoder_noise_time=UpperCAmelCase__ )
return logits
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "numpy" , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase__ )}.''' )
lowercase : List[Any] =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase : Optional[Any] =np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase : Any =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCAmelCase__ ):
if i == 0:
lowercase : Optional[int] =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase : List[Any] =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase : int =ones
lowercase : List[str] =self.scale_features(
UpperCAmelCase__ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase__ )
lowercase : Dict =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCAmelCase__ , continuous_mask=UpperCAmelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase : str =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase : str =self.decode(
encodings_and_masks=UpperCAmelCase__ , input_tokens=UpperCAmelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase : List[str] =self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Union[str, Any] =self.scale_to_features(UpperCAmelCase__ , input_range=[-1.0, 1.0] )
lowercase : str =mel[:1]
lowercase : List[Any] =mel.cpu().float().numpy()
lowercase : Any =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('''Generated segment''' , UpperCAmelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase : Dict =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase : Tuple =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase__ )
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Any:
lowercase : Any =ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
lowercase : List[Any] =parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__magic_name__ )
# Let's go
lowercase : Union[str, Any] =parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase : Optional[Any] =args.func(__magic_name__ )
service.run()
if __name__ == "__main__":
main()
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def _lowerCAmelCase ( __magic_name__ : Callable[[int | float], int | float] , __magic_name__ : int | float , __magic_name__ : int | float , __magic_name__ : int = 100 , ) -> float:
lowercase : List[Any] =x_start
lowercase : str =fnc(__magic_name__ )
lowercase : Optional[Any] =0.0
for _ in range(__magic_name__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase : Tuple =(x_end - x_start) / steps + xa
lowercase : Tuple =fnc(__magic_name__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase : List[str] =xa
lowercase : Union[str, Any] =fxa
return area
if __name__ == "__main__":
def _lowerCAmelCase ( __magic_name__ : Any ) -> int:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCamelCase_ = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 200 ) -> int:
lowercase : Any =[1, 2, 5, 10, 20, 50, 100, 200]
lowercase : int =[0] * (pence + 1)
lowercase : str =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__magic_name__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'xlm-roberta-xl'
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any]=250880 , UpperCAmelCase__ : Optional[Any]=2560 , UpperCAmelCase__ : Optional[int]=36 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Tuple=10240 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=514 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=1E-05 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Tuple="absolute" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Union[str, Any] =vocab_size
lowercase : Dict =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Optional[int] =hidden_act
lowercase : Any =intermediate_size
lowercase : Union[str, Any] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[Any] =layer_norm_eps
lowercase : Dict =position_embedding_type
lowercase : int =use_cache
lowercase : Optional[int] =classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Tuple ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionSAGPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Union[str, Any] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : str =CLIPTextModel(UpperCAmelCase__ )
lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[Any] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Any ={
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase : Tuple =sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : str ='''.'''
lowercase : int =torch.manual_seed(0 )
lowercase : Optional[int] =sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : str =output.images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Any =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : Tuple =sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[int] ='''.'''
lowercase : List[Any] =torch.manual_seed(0 )
lowercase : Optional[int] =sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : List[Any] =output.images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : List[str] =sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] ='''.'''
lowercase : List[str] =torch.manual_seed(0 )
lowercase : List[Any] =sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase : Any =output.images
assert image.shape == (1, 512, 768, 3)
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'deberta-v2'
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=128100 , UpperCAmelCase__ : str=1536 , UpperCAmelCase__ : Any=24 , UpperCAmelCase__ : Optional[int]=24 , UpperCAmelCase__ : Union[str, Any]=6144 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1E-7 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Union[str, Any]=-1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : List[str]="gelu" , **UpperCAmelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Dict =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Union[str, Any] =intermediate_size
lowercase : int =hidden_act
lowercase : List[Any] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : str =type_vocab_size
lowercase : Dict =initializer_range
lowercase : str =relative_attention
lowercase : List[Any] =max_relative_positions
lowercase : Union[str, Any] =pad_token_id
lowercase : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(UpperCAmelCase__ ) == str:
lowercase : Optional[int] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
lowercase : List[str] =pos_att_type
lowercase : List[str] =vocab_size
lowercase : str =layer_norm_eps
lowercase : Optional[int] =kwargs.get('''pooler_hidden_size''' , UpperCAmelCase__ )
lowercase : Tuple =pooler_dropout
lowercase : Union[str, Any] =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Optional[Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : List[Any] ={0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
lowercase : Dict =super().generate_dummy_inputs(preprocessor=UpperCAmelCase__ , framework=UpperCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : str , __magic_name__ : LevitConfig , __magic_name__ : Path , __magic_name__ : bool = True ) -> Tuple:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase : int =timm.create_model('''levit_128s''' , pretrained=__magic_name__ )
else:
lowercase : int =timm.create_model('''levit_128''' , pretrained=__magic_name__ )
if hidden_sizes == 192:
lowercase : Union[str, Any] =timm.create_model('''levit_192''' , pretrained=__magic_name__ )
if hidden_sizes == 256:
lowercase : int =timm.create_model('''levit_256''' , pretrained=__magic_name__ )
if hidden_sizes == 384:
lowercase : Dict =timm.create_model('''levit_384''' , pretrained=__magic_name__ )
from_model.eval()
lowercase : Any =LevitForImageClassificationWithTeacher(__magic_name__ ).eval()
lowercase : Any =OrderedDict()
lowercase : List[str] =from_model.state_dict()
lowercase : Dict =list(from_model.state_dict().keys() )
lowercase : str =list(our_model.state_dict().keys() )
print(len(__magic_name__ ) , len(__magic_name__ ) )
for i in range(len(__magic_name__ ) ):
lowercase : str =weights[og_keys[i]]
our_model.load_state_dict(__magic_name__ )
lowercase : Optional[int] =torch.randn((2, 3, 224, 224) )
lowercase : Tuple =from_model(__magic_name__ )
lowercase : str =our_model(__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ ), "The model logits don't match the original one."
lowercase : Tuple =name
print(__magic_name__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase : Union[str, Any] =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( __magic_name__ : Path , __magic_name__ : str = None , __magic_name__ : bool = True ) -> Tuple:
lowercase : List[Any] ='''imagenet-1k-id2label.json'''
lowercase : Optional[Any] =1000
lowercase : List[Any] =(1, num_labels)
lowercase : Dict ='''huggingface/label-files'''
lowercase : int =num_labels
lowercase : Optional[Any] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Optional[Any] ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
lowercase : Tuple =partial(__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ )
lowercase : int ={
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
lowercase : List[str] ={
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __magic_name__ , names_to_config[model_name] , __magic_name__ , __magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
from math import ceil
def _lowerCAmelCase ( __magic_name__ : int = 1001 ) -> int:
lowercase : Any =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase : Optional[Any] =2 * i + 1
lowercase : List[str] =2 * i
lowercase : Union[str, Any] =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
UpperCamelCase_ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : str , __magic_name__ : Optional[int] ) -> list[str]:
lowercase : Any =set()
# keep track of all the paths to be checked
lowercase : List[Any] =[[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase : List[str] =queue.pop(0 )
# get the last node from the path
lowercase : str =path[-1]
if node not in explored:
lowercase : Optional[int] =graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase : int =list(__magic_name__ )
new_path.append(__magic_name__ )
queue.append(__magic_name__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__magic_name__ )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase : Optional[int] =[start]
lowercase : List[str] =set(__magic_name__ )
# Keep tab on distances from `start` node.
lowercase : int ={start: 0, target: -1}
while queue:
lowercase : Tuple =queue.pop(0 )
if node == target:
lowercase : str =(
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__magic_name__ )
queue.append(__magic_name__ )
lowercase : Any =dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : int=[1, 1, 2, 1] , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=None , ):
'''simple docstring'''
lowercase : Dict =parent
lowercase : Optional[Any] =batch_size
lowercase : Optional[int] =image_size
lowercase : str =num_channels
lowercase : List[Any] =embeddings_size
lowercase : Tuple =hidden_sizes
lowercase : List[Any] =depths
lowercase : Optional[int] =is_training
lowercase : int =use_labels
lowercase : str =hidden_act
lowercase : int =num_labels
lowercase : Dict =scope
lowercase : Union[str, Any] =len(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : List[str] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Optional[Any] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Tuple =TFRegNetModel(config=UpperCAmelCase__ )
lowercase : List[str] =model(UpperCAmelCase__ , training=UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Any =self.num_labels
lowercase : List[str] =TFRegNetForImageClassification(UpperCAmelCase__ )
lowercase : List[Any] =model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int =config_and_inputs
lowercase : int ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =TFRegNetModelTester(self )
lowercase : Tuple =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[Any] =model_class(UpperCAmelCase__ )
lowercase : Optional[int] =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] =[*signature.parameters.keys()]
lowercase : Any =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ):
lowercase : Any =model_class(UpperCAmelCase__ )
lowercase : Optional[Any] =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) , training=UpperCAmelCase__ )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : str =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : str =layer_type
lowercase : List[Any] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[int] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int={} ):
lowercase : Optional[Any] =model(UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =model(UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
lowercase : Optional[Any] =model_class(UpperCAmelCase__ )
lowercase : int =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Any =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : str =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {'''output_hidden_states''': True} )
lowercase : int =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : str =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {'''output_hidden_states''': True} )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any =TFRegNetModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> str:
lowercase : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : Dict =self.default_image_processor
lowercase : str =prepare_img()
lowercase : Dict =image_processor(images=UpperCAmelCase__ , return_tensors='''tf''' )
# forward pass
lowercase : Optional[int] =model(**UpperCAmelCase__ , training=UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Optional[int] =tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 )
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : tuple , __magic_name__ : Path , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str]=False , ) -> Optional[int]:
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> List[str]:
lowercase : Optional[Any] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase : Tuple ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase : Tuple ='''cpu'''
lowercase : Dict =Path(__magic_name__ )
# VAE DECODER
lowercase : List[Any] =AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowercase : Any =vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase : Dict =vae_decoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , 25 , 25 ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__magic_name__ , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCamelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCamelCase_ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 12
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Tuple =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase__ )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Tuple =12
lowercase : Optional[Any] =12
lowercase : Tuple ={
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowercase : List[Any] =TransformeraDModel(**UpperCAmelCase__ )
return model
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : int ='''cpu'''
lowercase : List[str] =self.dummy_vqvae
lowercase : Optional[Any] =self.dummy_text_encoder
lowercase : List[str] =self.dummy_tokenizer
lowercase : List[Any] =self.dummy_transformer
lowercase : str =VQDiffusionScheduler(self.num_embed )
lowercase : Optional[int] =LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase__ )
lowercase : str =VQDiffusionPipeline(
vqvae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , transformer=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , learned_classifier_free_sampling_embeddings=UpperCAmelCase__ , )
lowercase : Dict =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple ='''teddy bear playing in the pool'''
lowercase : Any =torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
lowercase : Dict =pipe([prompt] , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' )
lowercase : List[str] =output.images
lowercase : int =torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
lowercase : List[Any] =pipe(
[prompt] , generator=UpperCAmelCase__ , output_type='''np''' , return_dict=UpperCAmelCase__ , num_inference_steps=2 )[0]
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
lowercase : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase : str =np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Dict ='''cpu'''
lowercase : Any =self.dummy_vqvae
lowercase : List[str] =self.dummy_text_encoder
lowercase : int =self.dummy_tokenizer
lowercase : str =self.dummy_transformer
lowercase : Dict =VQDiffusionScheduler(self.num_embed )
lowercase : Dict =LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase : Dict =VQDiffusionPipeline(
vqvae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , transformer=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , learned_classifier_free_sampling_embeddings=UpperCAmelCase__ , )
lowercase : int =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] ='''teddy bear playing in the pool'''
lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
lowercase : Optional[int] =pipe([prompt] , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' )
lowercase : Any =output.images
lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
lowercase : Dict =pipe(
[prompt] , generator=UpperCAmelCase__ , output_type='''np''' , return_dict=UpperCAmelCase__ , num_inference_steps=2 )[0]
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
lowercase : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase : List[Any] =np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowercase : Optional[Any] =VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowercase : int =pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase : int =torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
lowercase : Optional[int] =pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCAmelCase__ , output_type='''np''' , )
lowercase : Optional[Any] =output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
lowercase : Tuple =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
lowercase : Tuple =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
lowercase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
lowercase : str =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
lowercase : Tuple =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
lowercase : Optional[Any] =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
lowercase : Any =unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Any =pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
lowercase : Optional[Any] =pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(UpperCAmelCase__ )
@slow
@require_tf
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[str] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
lowercase : int =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
lowercase : Union[str, Any] =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
lowercase : List[str] =None
lowercase : Optional[int] =None
self.run_pipeline_test(UpperCAmelCase__ , [] )
@require_tf
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
lowercase : Any =None
lowercase : Any =None
self.run_pipeline_test(UpperCAmelCase__ , [] )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
lowercase : Tuple =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase : str =[
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =fill_masker.tokenizer
lowercase : List[str] =fill_masker.model
lowercase : str =fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
lowercase : str =fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
lowercase : Optional[int] =fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCAmelCase__ , [
[
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
],
[
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
],
] , )
with self.assertRaises(UpperCAmelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCAmelCase__ ):
fill_masker('''This is''' )
self.run_test_top_k(UpperCAmelCase__ , UpperCAmelCase__ )
self.run_test_targets(UpperCAmelCase__ , UpperCAmelCase__ )
self.run_test_top_k_targets(UpperCAmelCase__ , UpperCAmelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCAmelCase__ , UpperCAmelCase__ )
self.fill_mask_with_multiple_masks(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =tokenizer.get_vocab()
lowercase : Optional[int] =sorted(vocab.keys() )[:2]
# Pipeline argument
lowercase : Optional[int] =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , targets=UpperCAmelCase__ )
lowercase : str =fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
lowercase : Optional[Any] ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , UpperCAmelCase__ )
lowercase : Dict =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(UpperCAmelCase__ ) )
# Call argument
lowercase : Optional[Any] =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase : Any =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
lowercase : Any ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , UpperCAmelCase__ )
lowercase : List[str] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(UpperCAmelCase__ ) )
# Score equivalence
lowercase : int =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase__ )
lowercase : List[str] =[top_mask['''token_str'''] for top_mask in outputs]
lowercase : Tuple =[top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase__ ) == set(UpperCAmelCase__ ):
lowercase : List[str] =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase__ )
lowercase : List[str] =[top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Any =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Dict =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Union[str, Any] =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , top_k=2 )
lowercase : Dict =fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
lowercase : Dict =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase : int =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =tokenizer.get_vocab()
lowercase : int =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# top_k=2, ntargets=3
lowercase : Dict =sorted(vocab.keys() )[:3]
lowercase : str =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCAmelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowercase : Tuple =[el['''token_str'''] for el in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x["score"] , reverse=UpperCAmelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase__ ).issubset(UpperCAmelCase__ ):
lowercase : Any =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCAmelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Optional[int] =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.get_vocab()
# String duplicates + id duplicates
lowercase : Union[str, Any] =sorted(vocab.keys() )[:3]
lowercase : List[Any] =[targets[0], targets[1], targets[0], targets[2], targets[1]]
lowercase : Union[str, Any] =fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCAmelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCAmelCase__ ) , 3 )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase : Optional[int] =fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
[
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
],
[
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
],
[
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
{'''sequence''': ANY(UpperCAmelCase__ ), '''score''': ANY(UpperCAmelCase__ ), '''token''': ANY(UpperCAmelCase__ ), '''token_str''': ANY(UpperCAmelCase__ )},
],
] , )
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> List[Any]:
lowercase : List[Any] =1.5
lowercase : Optional[int] =int(factor * num_class_images )
lowercase : List[Any] =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=__magic_name__ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : Tuple =client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase : Dict =int(factor * num_images )
lowercase : Optional[Any] =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : str =0
lowercase : Union[str, Any] =0
lowercase : int =tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
lowercase : Union[str, Any] =class_images[count]
count += 1
try:
lowercase : Tuple =requests.get(images['''url'''] )
if img.status_code == 200:
lowercase : Any =Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowercase : List[Any] =argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCamelCase_ = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _lowerCAmelCase ( __magic_name__ : Dict=None ) -> int:
if subparsers is not None:
lowercase : Dict =subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase : str =argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase : List[str] =parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__magic_name__ , default=__magic_name__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__magic_name__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__magic_name__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase : Union[str, Any] =parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__magic_name__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Any:
lowercase : Optional[int] =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__magic_name__ ):
lowercase : int =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase : str =defaults.command_file
if not args.command and defaults.commands is not None:
lowercase : Optional[int] =defaults.commands
if not args.tpu_name:
lowercase : List[str] =defaults.tpu_name
if not args.tpu_zone:
lowercase : Tuple =defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase : Tuple ='''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase : Optional[Any] ='''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __magic_name__ ):
lowercase : Optional[Any] =f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase : Any =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __magic_name__ ):
lowercase : List[str] =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase : str =['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
lowercase : List[str] ='''; '''.join(__magic_name__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase : Optional[Any] =['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__magic_name__ )}''' )
return
subprocess.run(__magic_name__ )
print('''Successfully setup pod.''' )
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowercase : Optional[int] =tpu_command_parser()
lowercase : List[Any] =parser.parse_args()
tpu_command_launcher(__magic_name__ )
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
return getitem, k
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Optional[Any]:
return setitem, k, v
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
return delitem, k
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Any , *__magic_name__ : List[str] ) -> Dict:
try:
return fun(__magic_name__ , *__magic_name__ ), None
except Exception as e:
return None, e
UpperCamelCase_ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCamelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCamelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCamelCase_ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCamelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Tuple:
lowercase : List[str] =HashMap(initial_block_size=4 )
lowercase : Tuple ={}
for _, (fun, *args) in enumerate(__magic_name__ ):
lowercase , lowercase : List[str] =_run_operation(__magic_name__ , __magic_name__ , *__magic_name__ )
lowercase , lowercase : Tuple =_run_operation(__magic_name__ , __magic_name__ , *__magic_name__ )
assert my_res == py_res
assert str(__magic_name__ ) == str(__magic_name__ )
assert set(__magic_name__ ) == set(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
assert set(my.items() ) == set(py.items() )
def _lowerCAmelCase ( ) -> List[str]:
def is_public(__magic_name__ : str ) -> bool:
return not name.startswith('''_''' )
lowercase : Any ={name for name in dir({} ) if is_public(__magic_name__ )}
lowercase : int ={name for name in dir(HashMap() ) if is_public(__magic_name__ )}
assert dict_public_names > hash_public_names
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
pass
def _lowerCAmelCase ( __magic_name__ : Image ) -> str:
lowercase : Tuple =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Any =DepthEstimationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , UpperCAmelCase__ )
import datasets
lowercase : Any =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
lowercase : Dict =depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , UpperCAmelCase__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Tuple ='''Intel/dpt-large'''
lowercase : Union[str, Any] =pipeline('''depth-estimation''' , model=UpperCAmelCase__ )
lowercase : Any =depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase : Union[str, Any] =hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_62 )
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
UpperCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCamelCase_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
UpperCamelCase_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
lowercase : Optional[int] =0
lowercase : int =0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Any =compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> Any:
# Load configuration defined in the metadata file
with open(__magic_name__ ) as metadata_file:
lowercase : Union[str, Any] =json.load(__magic_name__ )
lowercase : int =LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowercase : Optional[int] =torch.load(__magic_name__ , map_location='''cpu''' )
# Load the entity vocab file
lowercase : Optional[Any] =load_entity_vocab(__magic_name__ )
lowercase : Optional[Any] =RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase : int =AddedToken('''<ent>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
lowercase : Optional[Any] =AddedToken('''<ent2>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowercase : str =LukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
lowercase : List[str] =state_dict['''embeddings.word_embeddings.weight''']
lowercase : Union[str, Any] =word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
lowercase : int =word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
lowercase : List[Any] =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase : Optional[Any] =f'''encoder.layer.{layer_index}.attention.self.'''
lowercase : Tuple =state_dict[prefix + matrix_name]
lowercase : Tuple =state_dict[prefix + matrix_name]
lowercase : Union[str, Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase : Optional[int] =state_dict['''entity_embeddings.entity_embeddings.weight''']
lowercase : Dict =entity_emb[entity_vocab['''[MASK]''']]
lowercase : Optional[Any] =LukeModel(config=__magic_name__ ).eval()
lowercase , lowercase : List[Any] =model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if not (len(__magic_name__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__magic_name__ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
lowercase : Optional[int] =LukeTokenizer.from_pretrained(__magic_name__ , task='''entity_classification''' )
lowercase : Tuple =(
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowercase : Optional[int] =(39, 42)
lowercase : int =tokenizer(__magic_name__ , entity_spans=[span] , add_prefix_space=__magic_name__ , return_tensors='''pt''' )
lowercase : Any =model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
lowercase : str =torch.Size((1, 42, 1024) )
lowercase : List[Any] =torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowercase : List[Any] =torch.Size((1, 42, 768) )
lowercase : List[str] =torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase : Dict =torch.Size((1, 1, 1024) )
lowercase : int =torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowercase : Dict =torch.Size((1, 1, 768) )
lowercase : Union[str, Any] =torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> Any:
lowercase : Dict ={}
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
lowercase , lowercase : str =line.rstrip().split('''\t''' )
lowercase : int =index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCamelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : str =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase__ , )
lowercase : Dict =image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Tuple =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Union[str, Any] =self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
lowercase : List[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Optional[Any] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase__ ), "This is a local test"
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] | None = None ) -> list[list[str]]:
lowercase : Any =word_bank or []
# create a table
lowercase : int =len(__magic_name__ ) + 1
lowercase : list[list[list[str]]] =[]
for _ in range(__magic_name__ ):
table.append([] )
# seed value
lowercase : int =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(__magic_name__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__magic_name__ )] == word:
lowercase : list[list[str]] =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__magic_name__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__magic_name__ )]:
combination.reverse()
return table[len(__magic_name__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'Pix2StructImageProcessor'
lowerCamelCase_ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = 2048 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase : Optional[Any] =self.tokenizer
lowercase : str =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase : List[str] =self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , **UpperCAmelCase__ )
else:
# add pixel_values and bbox
lowercase : Optional[int] =self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and not self.image_processor.is_vqa:
lowercase : Union[str, Any] =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
if "attention_mask" in text_encoding:
lowercase : List[Any] =text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowercase : Union[str, Any] =text_encoding.pop('''input_ids''' )
else:
lowercase : Tuple =None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.tokenizer.model_input_names
lowercase : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : int =TextaTextGenerationPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =generator('''Something there''' )
self.assertEqual(UpperCAmelCase__ , [{'''generated_text''': ANY(UpperCAmelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowercase : Optional[Any] =generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
[{'''generated_text''': ANY(UpperCAmelCase__ )}, {'''generated_text''': ANY(UpperCAmelCase__ )}],
[{'''generated_text''': ANY(UpperCAmelCase__ )}, {'''generated_text''': ANY(UpperCAmelCase__ )}],
] , )
lowercase : Tuple =generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
[{'''generated_text''': ANY(UpperCAmelCase__ )}, {'''generated_text''': ANY(UpperCAmelCase__ )}],
[{'''generated_text''': ANY(UpperCAmelCase__ )}, {'''generated_text''': ANY(UpperCAmelCase__ )}],
] , )
with self.assertRaises(UpperCAmelCase__ ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
lowercase : int =generator('''Something there''' , do_sample=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [{'''generated_text''': ''''''}] )
lowercase : Any =3
lowercase : Optional[Any] =generator(
'''Something there''' , num_return_sequences=UpperCAmelCase__ , num_beams=UpperCAmelCase__ , )
lowercase : Optional[int] =[
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =generator('''This is a test''' , do_sample=UpperCAmelCase__ , num_return_sequences=2 , return_tensors=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
lowercase : Any =generator.model.config.eos_token_id
lowercase : Optional[int] ='''<pad>'''
lowercase : Tuple =generator(
['''This is a test''', '''This is a second test'''] , do_sample=UpperCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase__ , )
self.assertEqual(
UpperCAmelCase__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
lowercase : List[str] =generator('''Something there''' , do_sample=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [{'''generated_text''': ''''''}] )
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 10**9 ) -> int:
lowercase : Optional[Any] =1
lowercase : Tuple =2
lowercase : Tuple =0
lowercase : Any =0
lowercase : List[Any] =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase : str =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : Any =rescale_factor
lowercase : Optional[Any] =do_pad
lowercase : Dict =pad_size
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Optional[int] =get_image_size(UpperCAmelCase__ )
lowercase : str =(old_height // size + 1) * size - old_height
lowercase : List[str] =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : str =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_pad if do_pad is not None else self.do_pad
lowercase : List[str] =pad_size if pad_size is not None else self.pad_size
lowercase : Tuple =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Optional[int] =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Dict =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : List[str] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Any =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Union[str, Any] ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : Optional[Any] =0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase : Tuple =''''''
lowercase : Optional[int] =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__magic_name__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase , lowercase : List[Any] =0, 0
# length[i] shows the length of palindromic substring with center i
lowercase : Optional[Any] =[1 for i in range(len(__magic_name__ ) )]
# for each character in new_string find corresponding palindromic string
lowercase : Any =0
for j in range(len(__magic_name__ ) ):
lowercase : Union[str, Any] =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__magic_name__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase : str =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase : List[str] =j - k + 1 # noqa: E741
lowercase : Any =j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase : int =length[j]
lowercase : Tuple =j
# create that string
lowercase : str =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : List[str] =''''''
for word_or_phrase in separated:
if not isinstance(__magic_name__ , __magic_name__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__magic_name__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowercase : List[Any] =[True] * (num + 1)
lowercase : Optional[int] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __magic_name__ ):
lowercase : Union[str, Any] =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]] ):
'''simple docstring'''
lowercase : Dict =row
lowercase : Union[str, Any] =col
lowercase : int =graph
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]] ):
'''simple docstring'''
# Checking all 8 elements surrounding nth element
lowercase : Optional[Any] =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Optional[Any] =[-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Optional[int] =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ): # And finally, count all islands.
'''simple docstring'''
lowercase : Tuple =[[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
count += 1
return count
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( __magic_name__ : dict ) -> tuple:
return (data["data"], data["target"])
def _lowerCAmelCase ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray ) -> np.ndarray:
lowercase : Optional[int] =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__magic_name__ , __magic_name__ )
# Predict target for test data
lowercase : List[str] =xgb.predict(__magic_name__ )
lowercase : List[str] =predictions.reshape(len(__magic_name__ ) , 1 )
return predictions
def _lowerCAmelCase ( ) -> None:
lowercase : str =fetch_california_housing()
lowercase , lowercase : str =data_handling(__magic_name__ )
lowercase , lowercase , lowercase , lowercase : Optional[int] =train_test_split(
__magic_name__ , __magic_name__ , test_size=0.2_5 , random_state=1 )
lowercase : Dict =xgboost(__magic_name__ , __magic_name__ , __magic_name__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__magic_name__ , __magic_name__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(__magic_name__ , __magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'scipy']
def __init__( self : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase_ = {"""facebook/blenderbot_small-90M""": 512}
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
lowercase : Any =set()
lowercase : str =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : Dict =char
lowercase : List[str] =set(__magic_name__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int="__start__" , UpperCAmelCase__ : Optional[Any]="__end__" , UpperCAmelCase__ : List[Any]="__unk__" , UpperCAmelCase__ : Optional[int]="__null__" , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
lowercase : Tuple =json.load(UpperCAmelCase__ )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
lowercase : Optional[Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Union[str, Any] =[tuple(merge.split() ) for merge in merges]
lowercase : Tuple =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Dict ={}
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , UpperCAmelCase__ )
lowercase : List[Any] =re.sub('''(\')''' , r''' \1 ''' , UpperCAmelCase__ )
lowercase : Optional[int] =re.sub(r'''\s{2,}''' , ''' ''' , UpperCAmelCase__ )
if "\n" in token:
lowercase : Tuple =token.replace('''\n''' , ''' __newln__''' )
lowercase : Tuple =token.split(''' ''' )
lowercase : List[Any] =[]
for token in tokens:
if not len(UpperCAmelCase__ ):
continue
lowercase : Optional[Any] =token.lower()
lowercase : List[Any] =tuple(UpperCAmelCase__ )
lowercase : Any =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase : List[str] =get_pairs(UpperCAmelCase__ )
if not pairs:
words.append(UpperCAmelCase__ )
continue
while True:
lowercase : Optional[int] =min(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Dict =bigram
lowercase : Union[str, Any] =[]
lowercase : Tuple =0
while i < len(UpperCAmelCase__ ):
try:
lowercase : Any =word.index(UpperCAmelCase__ , UpperCAmelCase__ )
new_word.extend(word[i:j] )
lowercase : Any =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : Tuple =tuple(UpperCAmelCase__ )
lowercase : List[str] =new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
lowercase : int =get_pairs(UpperCAmelCase__ )
lowercase : Tuple ='''@@ '''.join(UpperCAmelCase__ )
lowercase : List[Any] =word[:-4]
lowercase : Optional[int] =word
words.append(UpperCAmelCase__ )
return " ".join(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Union[str, Any] =[]
lowercase : Union[str, Any] =re.findall(r'''\S+\n?''' , UpperCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__ ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : List[Any] =token.lower()
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int ):
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Dict =''' '''.join(UpperCAmelCase__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Tuple =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : str =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + '''\n''' )
lowercase : Optional[int] =0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase : List[Any] =token_index
writer.write(''' '''.join(UpperCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : str=48000 , UpperCAmelCase__ : List[Any]=480 , UpperCAmelCase__ : List[str]=10 , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 14000 , UpperCAmelCase__ : int = None , UpperCAmelCase__ : str = "fusion" , UpperCAmelCase__ : str = "repeatpad" , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Tuple =top_db
lowercase : List[str] =truncation
lowercase : int =padding
lowercase : Tuple =fft_window_size
lowercase : Union[str, Any] =(fft_window_size >> 1) + 1
lowercase : List[Any] =hop_length
lowercase : Dict =max_length_s
lowercase : int =max_length_s * sampling_rate
lowercase : Union[str, Any] =sampling_rate
lowercase : int =frequency_min
lowercase : List[Any] =frequency_max
lowercase : Dict =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase__ , min_frequency=UpperCAmelCase__ , max_frequency=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , norm=UpperCAmelCase__ , mel_scale='''htk''' , )
lowercase : Optional[int] =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase__ , min_frequency=UpperCAmelCase__ , max_frequency=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =copy.deepcopy(self.__dict__ )
lowercase : List[str] =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.array , UpperCAmelCase__ : Optional[np.array] = None ):
'''simple docstring'''
lowercase : int =spectrogram(
UpperCAmelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Any =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase : Optional[Any] =[0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase : List[Any] =[0]
# randomly choose index for each part
lowercase : Any =np.random.choice(ranges[0] )
lowercase : Any =np.random.choice(ranges[1] )
lowercase : str =np.random.choice(ranges[2] )
lowercase : List[Any] =mel[idx_front : idx_front + chunk_frames, :]
lowercase : Any =mel[idx_middle : idx_middle + chunk_frames, :]
lowercase : List[Any] =mel[idx_back : idx_back + chunk_frames, :]
lowercase : List[str] =torch.tensor(mel[None, None, :] )
lowercase : Tuple =torch.nn.functional.interpolate(
UpperCAmelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
lowercase : Union[str, Any] =mel_shrink[0][0].numpy()
lowercase : Dict =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : np.array , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase : Any =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase : int =len(UpperCAmelCase__ ) - max_length
lowercase : Any =np.random.randint(0 , overflow + 1 )
lowercase : int =waveform[idx : idx + max_length]
lowercase : List[str] =self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase : Optional[Any] =self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters )
lowercase : Tuple =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase : str =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase : List[str] =np.stack([mel, mel, mel, mel] , axis=0 )
lowercase : List[Any] =False
else:
lowercase : Optional[int] =self._random_mel_fusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
lowercase : Optional[int] =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase : Any =int(max_length / len(UpperCAmelCase__ ) )
lowercase : List[Any] =np.stack(np.tile(UpperCAmelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase : str =int(max_length / len(UpperCAmelCase__ ) )
lowercase : Any =np.stack(np.tile(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Union[str, Any] =np.pad(UpperCAmelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowercase : List[Any] =self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters )
lowercase : Any =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowercase : Optional[int] =self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : Union[str, Any] =truncation if truncation is not None else self.truncation
lowercase : str =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Dict =isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase : Union[str, Any] =is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : Union[str, Any] =[np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase : Optional[int] =np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : int =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict =[np.asarray(UpperCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase : List[str] =[
self._get_input_mel(UpperCAmelCase__ , max_length if max_length else self.nb_max_samples , UpperCAmelCase__ , UpperCAmelCase__ )
for waveform in raw_speech
]
lowercase : int =[]
lowercase : List[Any] =[]
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase__ )
is_longer.append(UpperCAmelCase__ )
if truncation == "fusion" and sum(UpperCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase : int =np.random.randint(0 , len(UpperCAmelCase__ ) )
lowercase : Tuple =True
if isinstance(input_mel[0] , UpperCAmelCase__ ):
lowercase : List[str] =[np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase : List[Any] =[[longer] for longer in is_longer]
lowercase : Dict ={'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase : List[str] =BatchFeature(UpperCAmelCase__ )
if return_tensors is not None:
lowercase : Any =input_features.convert_to_tensors(UpperCAmelCase__ )
return input_features
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCamelCase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCamelCase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCamelCase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCamelCase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModel)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = XGLMConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any=14 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Dict=0.02 , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : Dict =is_training
lowercase : Union[str, Any] =use_input_mask
lowercase : Optional[Any] =use_labels
lowercase : List[str] =vocab_size
lowercase : str =d_model
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Optional[int] =ffn_dim
lowercase : int =activation_function
lowercase : int =activation_dropout
lowercase : str =attention_dropout
lowercase : Tuple =max_position_embeddings
lowercase : List[str] =initializer_range
lowercase : Optional[int] =None
lowercase : Dict =0
lowercase : Any =2
lowercase : Tuple =1
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : int =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str =self.get_config()
lowercase : List[str] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[Any] =config_and_inputs
lowercase : str ={
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase_ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Tuple =TFXGLMModelTester(self )
lowercase : Any =ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict =TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Union[str, Any]=True ):
'''simple docstring'''
lowercase : Dict =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase : Any =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase : Tuple =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowercase : Any =model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase : List[Any] =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowercase : str =tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
lowercase : Optional[int] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowercase : Optional[Any] =model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0] )
lowercase : Optional[Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ )
lowercase : Tuple =(
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase : List[str] =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase : Optional[Any] ='''left'''
# use different length sentences to test batching
lowercase : Tuple =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__ )
lowercase : str =inputs['''input_ids''']
lowercase : Dict =model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
lowercase : Tuple =tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
lowercase : List[Any] =model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12 )
lowercase : str =tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
lowercase : Dict =model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12 )
lowercase : Union[str, Any] =tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : int =tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
lowercase : int =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : Optional[int] =parent
lowercase : Tuple =batch_size
lowercase : Optional[int] =seq_length
lowercase : Optional[Any] =is_training
lowercase : Any =use_input_mask
lowercase : Any =use_token_type_ids
lowercase : Dict =use_labels
lowercase : Any =vocab_size
lowercase : int =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Optional[int] =intermediate_multiple_size
lowercase : str =hidden_act
lowercase : List[Any] =hidden_dropout
lowercase : Tuple =attention_dropout
lowercase : Optional[int] =weight_tying
lowercase : List[Any] =max_position_embeddings
lowercase : Union[str, Any] =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : List[str] =initializer_range
lowercase : List[Any] =num_labels
lowercase : Union[str, Any] =num_choices
lowercase : Union[str, Any] =scope
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =None
if self.use_input_mask:
lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_labels:
lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple =self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] =self.prepare_config_and_inputs()
lowercase : Dict =True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : int =GPTNeoXJapaneseModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase : Optional[Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : str =True
lowercase : Dict =GPTNeoXJapaneseModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : int =GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =True
lowercase : Optional[Any] =GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowercase : Tuple =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase : Dict =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : Union[str, Any] =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
lowercase : Any =output_from_no_past['''hidden_states'''][0]
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
lowercase : Tuple =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =GPTNeoXJapaneseModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
lowercase , lowercase , lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : Any =None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any ='''abeja/gpt-neox-japanese-2.7b'''
lowercase : Optional[int] =['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowercase : Any =[
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowercase : Optional[int] =GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : List[Any] =GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase__ )
lowercase : List[str] =[]
for prompt in prompts:
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ).input_ids
lowercase : List[Any] =model.generate(UpperCAmelCase__ , max_length=50 )
lowercase : List[str] =tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Dict , *UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Dict =eval_examples
lowercase : Union[str, Any] =post_process_function
lowercase : Optional[Any] =quant_trainer_args
lowercase : Tuple =128 # default number of calibration samples
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
lowercase : Dict =calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase : str =self._remove_unused_columns(UpperCAmelCase__ , description='''Calibration''' )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
lowercase : int =self.train_dataset if calib_dataset is None else calib_dataset
lowercase : Any =self.get_calib_dataloader(UpperCAmelCase__ )
lowercase : Optional[int] =self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
lowercase , lowercase , lowercase : Any =self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
lowercase : Union[str, Any] =model
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str = "eval" ):
'''simple docstring'''
lowercase : str =self.eval_dataset if eval_dataset is None else eval_dataset
lowercase : List[Any] =self.get_eval_dataloader(UpperCAmelCase__ )
lowercase : Optional[int] =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase : Optional[int] =self.compute_metrics
lowercase : Dict =None
lowercase : int =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase : List[str] =eval_loop(
UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowercase : Tuple =compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase : Tuple =self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
lowercase : Optional[int] =self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase : Optional[int] =metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
lowercase : str ={}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase : Any =self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str = "test" ):
'''simple docstring'''
lowercase : Any =self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase : List[str] =self.compute_metrics
lowercase : List[str] =None
lowercase : Union[str, Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase : List[Any] =eval_loop(
UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowercase : Tuple =compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase : List[str] =self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , '''predict''' )
lowercase : Any =self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase : str =metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int]="./" ):
'''simple docstring'''
lowercase : str =self.eval_dataset
lowercase : List[str] =self.get_eval_dataloader(UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
lowercase : Any =tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
lowercase : str =True
lowercase : Optional[int] =self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
lowercase : Optional[int] =model.module if hasattr(UpperCAmelCase__ , '''module''' ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
lowercase : Dict =os.path.join(UpperCAmelCase__ , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
lowercase : Any ={0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=13 , do_constant_folding=UpperCAmelCase__ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCAmelCase__ , )
logger.info('''onnx export finished''' )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Dict=400 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase : Union[str, Any] =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Optional[Any] =parent
lowercase : int =batch_size
lowercase : Union[str, Any] =num_channels
lowercase : List[str] =image_size
lowercase : Optional[int] =min_resolution
lowercase : str =max_resolution
lowercase : Union[str, Any] =do_resize
lowercase : Optional[int] =size
lowercase : Tuple =do_normalize
lowercase : Optional[int] =image_mean
lowercase : Union[str, Any] =image_std
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Any =EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processor
lowercase : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[str] =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : int =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase : int =image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processor
lowercase : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : List[Any] =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : str =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase : Dict =image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
lowercase : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : int =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase : Tuple =image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit_text_model'
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=49408 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Dict=2048 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : List[Any]="quick_gelu" , UpperCAmelCase__ : Union[str, Any]=1E-5 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=49406 , UpperCAmelCase__ : str=49407 , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Tuple =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : Dict =intermediate_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : Union[str, Any] =num_attention_heads
lowercase : Tuple =max_position_embeddings
lowercase : Optional[int] =hidden_act
lowercase : Tuple =layer_norm_eps
lowercase : List[Any] =attention_dropout
lowercase : Dict =initializer_range
lowercase : Tuple =initializer_factor
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase , lowercase : List[Any] =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowercase : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit_vision_model'
def __init__( self : Tuple , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Tuple=3072 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : int="quick_gelu" , UpperCAmelCase__ : str=1E-5 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Dict=1.0 , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Dict =hidden_size
lowercase : List[Any] =intermediate_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Tuple =num_attention_heads
lowercase : str =num_channels
lowercase : Tuple =image_size
lowercase : Optional[Any] =patch_size
lowercase : List[str] =hidden_act
lowercase : Any =layer_norm_eps
lowercase : List[str] =attention_dropout
lowercase : Union[str, Any] =initializer_range
lowercase : List[str] =initializer_factor
@classmethod
def lowerCamelCase_ ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase , lowercase : str =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowercase : Optional[Any] =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit'
lowerCamelCase_ = True
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[Any]=2.65_92 , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if text_config is None:
lowercase : Any ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
lowercase : Dict ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
lowercase : Tuple =OwlViTTextConfig(**UpperCAmelCase__ )
lowercase : Any =OwlViTVisionConfig(**UpperCAmelCase__ )
lowercase : int =projection_dim
lowercase : List[str] =logit_scale_init_value
lowercase : Union[str, Any] =return_dict
lowercase : int =1.0
@classmethod
def lowerCamelCase_ ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase , lowercase : int =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict ={}
lowercase : Tuple =text_config
lowercase : Optional[Any] =vision_config
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =copy.deepcopy(self.__dict__ )
lowercase : List[str] =self.text_config.to_dict()
lowercase : Optional[Any] =self.vision_config.to_dict()
lowercase : Tuple =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1E-4
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : "ProcessorMixin" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
lowercase : Dict =super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase : List[str] =super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCAmelCase__ , framework=UpperCAmelCase__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 14
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCamelCase_ = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCamelCase_ = """|""".join(sys.argv[1:])
UpperCamelCase_ = re.compile(rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'ctrl'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , UpperCAmelCase__ : int=246534 , UpperCAmelCase__ : Union[str, Any]=256 , UpperCAmelCase__ : Union[str, Any]=1280 , UpperCAmelCase__ : int=8192 , UpperCAmelCase__ : List[str]=48 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : int=1E-6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : Dict =vocab_size
lowercase : str =n_positions
lowercase : List[Any] =n_embd
lowercase : int =n_layer
lowercase : Union[str, Any] =n_head
lowercase : str =dff
lowercase : Union[str, Any] =resid_pdrop
lowercase : Optional[int] =embd_pdrop
lowercase : List[Any] =layer_norm_epsilon
lowercase : Tuple =initializer_range
lowercase : List[Any] =use_cache
super().__init__(**UpperCAmelCase__ )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "dict"
lowerCamelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ = field(default='Audio' , init=lowercase__ , repr=lowercase__ )
def __call__( self : Tuple ):
'''simple docstring'''
return self.pa_type
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, bytes, dict] ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase : Optional[int] =BytesIO()
sf.write(UpperCAmelCase__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase : Optional[int] =np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowercase : Union[str, Any] =np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 32767
lowercase : Tuple =BytesIO(bytes() )
sf.write(UpperCAmelCase__ , UpperCAmelCase__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : dict , UpperCAmelCase__ : Optional[Dict[str, Union[str, bool, None]]] = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowercase , lowercase : str =(value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowercase : Union[str, Any] =xsplitext(UpperCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowercase : List[str] =token_per_repo_id or {}
lowercase : Dict =path.split('''::''' )[-1]
try:
lowercase : Optional[Any] =string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
lowercase : Optional[int] =token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase : List[Any] =None
with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__ ) as f:
lowercase , lowercase : Any =sf.read(UpperCAmelCase__ )
else:
lowercase , lowercase : Optional[Any] =sf.read(UpperCAmelCase__ )
lowercase : int =array.T
if self.mono:
lowercase : Tuple =librosa.to_mono(UpperCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase : Union[str, Any] =librosa.resample(UpperCAmelCase__ , orig_sr=UpperCAmelCase__ , target_sr=self.sampling_rate )
lowercase : str =self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase : List[Any] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
lowercase : Tuple =pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase : List[Any] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
lowercase : List[str] =pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowercase : int =pa.array([Audio().encode_example(UpperCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase : Any =storage.field('''bytes''' )
else:
lowercase : Optional[int] =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase : Optional[Any] =storage.field('''path''' )
else:
lowercase : Dict =pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
lowercase : Optional[Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase__ : Tuple ):
with xopen(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : str =f.read()
return bytes_
lowercase : List[Any] =pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase : Optional[Any] =pa.array(
[os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase : List[str] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = Dict[str, Any]
UpperCamelCase_ = List[Prediction]
@add_end_docstrings(lowercase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Any , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self : Dict , **UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict ={}
if "threshold" in kwargs:
lowercase : str =kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =load_image(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.IntTensor([[image.height, image.width]] )
lowercase : int =self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
lowercase : int =self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
lowercase : str =target_size
return inputs
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =model_inputs.pop('''target_size''' )
lowercase : Union[str, Any] =self.model(**UpperCAmelCase__ )
lowercase : Any =outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
lowercase : List[str] =model_inputs['''bbox''']
return model_outputs
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=0.9 ):
'''simple docstring'''
lowercase : Dict =model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase , lowercase : Union[str, Any] =target_size[0].tolist()
def unnormalize(UpperCAmelCase__ : Union[str, Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowercase , lowercase : List[Any] =model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase : Optional[int] =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase : Dict =[unnormalize(UpperCAmelCase__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
lowercase : List[str] =['''score''', '''label''', '''box''']
lowercase : Any =[dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for vals in zip(scores.tolist() , UpperCAmelCase__ , UpperCAmelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase : Tuple =self.image_processor.post_process_object_detection(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =raw_annotations[0]
lowercase : Dict =raw_annotation['''scores''']
lowercase : Optional[Any] =raw_annotation['''labels''']
lowercase : List[str] =raw_annotation['''boxes''']
lowercase : List[Any] =scores.tolist()
lowercase : str =[self.model.config.idalabel[label.item()] for label in labels]
lowercase : List[str] =[self._get_bounding_box(UpperCAmelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase : str =['''score''', '''label''', '''box''']
lowercase : Optional[int] =[
dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =box.int().tolist()
lowercase : Optional[Any] ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : str=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : List[str]=True , ):
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase : str =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : str =parent
lowercase : Dict =batch_size
lowercase : Union[str, Any] =num_channels
lowercase : int =min_resolution
lowercase : str =max_resolution
lowercase : List[str] =do_resize
lowercase : Dict =size
lowercase : Optional[Any] =do_normalize
lowercase : List[Any] =image_mean
lowercase : Optional[int] =image_std
lowercase : Optional[int] =do_rescale
lowercase : int =rescale_factor
lowercase : int =do_pad
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
if not batched:
lowercase : List[str] =image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowercase , lowercase : str =image.size
else:
lowercase , lowercase : List[str] =image.shape[1], image.shape[2]
if w < h:
lowercase : Optional[Any] =int(self.size['''shortest_edge'''] * h / w )
lowercase : Dict =self.size['''shortest_edge''']
elif w > h:
lowercase : str =self.size['''shortest_edge''']
lowercase : Optional[int] =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : str =self.size['''shortest_edge''']
lowercase : Union[str, Any] =self.size['''shortest_edge''']
else:
lowercase : Union[str, Any] =[]
for image in image_inputs:
lowercase , lowercase : Any =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : Optional[int] =max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
lowercase : Dict =max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Any =ConditionalDetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
lowercase : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
lowercase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowercase : Any =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Dict =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : List[str] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Any =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# prepare image and target
lowercase : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Tuple =json.loads(f.read() )
lowercase : List[str] ={'''image_id''': 39769, '''annotations''': target}
# encode them
lowercase : Optional[Any] =ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
lowercase : Dict =image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
lowercase : Union[str, Any] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__ )
lowercase : str =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase : int =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__ ) )
# verify boxes
lowercase : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__ )
lowercase : List[str] =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase : Tuple =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__ ) )
# verify is_crowd
lowercase : Tuple =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__ ) )
# verify class_labels
lowercase : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__ ) )
# verify orig_size
lowercase : int =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__ ) )
# verify size
lowercase : List[str] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# prepare image, target and masks_path
lowercase : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Dict =json.loads(f.read() )
lowercase : List[str] ={'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
lowercase : Any =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Union[str, Any] =ConditionalDetrImageProcessor(format='''coco_panoptic''' )
lowercase : Optional[Any] =image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
lowercase : List[Any] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__ )
lowercase : int =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase : int =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__ ) )
# verify boxes
lowercase : str =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__ )
lowercase : List[Any] =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase : Union[str, Any] =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__ ) )
# verify is_crowd
lowercase : Dict =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__ ) )
# verify class_labels
lowercase : Any =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__ ) )
# verify masks
lowercase : Optional[Any] =822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__ )
# verify orig_size
lowercase : Optional[Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__ ) )
# verify size
lowercase : Optional[int] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__ ) )
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.