code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _A ( A__ , A__ ):
"""simple docstring"""
inspect_dataset(A__ , A__ )
__lowercase = path + '''.py'''
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _A ( A__ , A__ ):
"""simple docstring"""
inspect_metric(A__ , A__ )
__lowercase = path + '''.py'''
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = get_dataset_config_info(A__ , config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ ):
get_dataset_config_info(A__ , config_name=A__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
__lowercase = expected_configs[0]
assert expected_config in infos
__lowercase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = get_dataset_infos(A__ )
assert expected_config in infos
__lowercase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ ):
get_dataset_split_names(A__ , config_name=A__ )
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowercase__ : Dict ,**lowercase__ : Union[str, Any] ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BlenderbotConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Dict = 'gelu'
def __init__( self : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple=1_3 ,lowercase__ : Optional[int]=7 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=False ,lowercase__ : Union[str, Any]=9_9 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=2 ,lowercase__ : Dict=4 ,lowercase__ : Any=3_7 ,lowercase__ : Tuple=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : List[str]=2_0 ,lowercase__ : List[str]=2 ,lowercase__ : Optional[int]=1 ,lowercase__ : Any=0 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
__lowercase = tf.concat([input_ids, eos_tensor] ,axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
__lowercase = prepare_blenderbot_inputs_dict(lowercase__ ,lowercase__ ,lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : str ):
__lowercase = TFBlenderbotModel(config=lowercase__ ).get_decoder()
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = inputs_dict['''head_mask''']
__lowercase = 1
# first forward pass
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ ,use_cache=lowercase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] ,axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ ,lowercase__ ,rtol=1e-3 )
def _A ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : int = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFBlenderbotModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['My friends are cool but they eat too many carbs.']
SCREAMING_SNAKE_CASE : List[str] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.tokenizer(self.src_text ,return_tensors='''tf''' )
__lowercase = self.model.generate(
model_inputs.input_ids ,)
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowercase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 624 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.FloatTensor
SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def _A ( A__ , A__=0.9_9_9 , A__="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__lowercase = []
for i in range(A__ ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 1
@register_to_config
def __init__( self : Union[str, Any] ,lowercase__ : int = 1_0_0_0 ,lowercase__ : float = 0.0_0_0_1 ,lowercase__ : float = 0.0_2 ,lowercase__ : str = "linear" ,lowercase__ : Optional[Union[np.ndarray, List[float]]] = None ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : int = 0 ,lowercase__ : str = "epsilon" ,lowercase__ : float = 1.0 ,**lowercase__ : Optional[int] ,):
if kwargs.get('''set_alpha_to_one''' ,lowercase__ ) is not None:
__lowercase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' ,'''1.0.0''' ,lowercase__ ,standard_warn=lowercase__ )
__lowercase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__lowercase = torch.tensor(lowercase__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase = torch.linspace(lowercase__ ,lowercase__ ,lowercase__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowercase__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(lowercase__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
__lowercase = 1.0 - self.betas
__lowercase = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__lowercase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__lowercase = 1.0
# setable values
__lowercase = None
__lowercase = torch.from_numpy(np.arange(0 ,lowercase__ ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : torch.FloatTensor ,lowercase__ : Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
__lowercase = num_inference_steps
__lowercase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(0 ,lowercase__ ) * step_ratio).round().copy().astype(np.intaa )
__lowercase = torch.from_numpy(lowercase__ ).to(lowercase__ )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : torch.FloatTensor ,lowercase__ : int ,lowercase__ : torch.FloatTensor ,lowercase__ : float = 0.0 ,lowercase__ : bool = False ,lowercase__ : Optional[torch.FloatTensor] = None ,lowercase__ : bool = True ,):
# 1. get previous step value (=t+1)
__lowercase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__lowercase = self.alphas_cumprod[timestep]
__lowercase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__lowercase = model_output
elif self.config.prediction_type == "sample":
__lowercase = model_output
__lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__lowercase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__lowercase = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowercase__ ,pred_original_sample=lowercase__ )
def __len__( self : List[Any] ):
return self.config.num_train_timesteps
| 624 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Any=None ,**lowercase__ : Optional[int] ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' ,lowercase__ ,)
super().__init__(args=lowercase__ ,**lowercase__ )
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Any ):
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(lowercase__ ,lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = None
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ):
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ):
__lowercase = algorithm(self )
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : str ):
if not self.executed:
self._algorithm()
__lowercase = True
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
# use this to save your result
__lowercase = -1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Optional[int] ):
super().__init__(lowercase__ )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(lowercase__ ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(lowercase__ ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ ,lowercase__ )
self.relabel(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[str] ,lowercase__ : Any ):
__lowercase = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
lowerCAmelCase__ = [0]
lowerCAmelCase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase__ = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 624 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
__lowercase = 4
__lowercase = (1 << p) - 1
for _ in range(p - 2 ):
__lowercase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 624 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _A ( A__ ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=lowercase__ )
def __init__( self : Optional[Any] ,lowercase__ : str ,lowercase__ : str ,*lowercase__ : Optional[Any] ):
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(lowercase__ )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}" )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
if not os.path.isfile(lowercase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(lowercase__ ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda lowercase__ : e in out_line ,lowercase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase__ ) + '''\n''' )
out_lines.append(lowercase__ )
out_lines.append(lowercase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(lowercase__ ,lowercase__ ,lowercase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,lowercase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(lowercase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
self._logger.info(F"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowercase__ )
if needs_manual_update:
with_manual_update.append(lowercase__ )
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(lowercase__ )
self._logger.info(F"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(lowercase__ )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(F"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowercase__ ,lowercase__ )
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 624 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
import string
import numpy
def _A ( A__ , A__ ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE : Dict = numpy.vectorize(lambda lowerCamelCase__ : x % 3_6 )
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.vectorize(lowerCamelCase__ )
def __init__( self : Any ,lowercase__ : numpy.ndarray ):
__lowercase = self.modulus(lowercase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowercase = encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ):
return self.key_string.index(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
return self.key_string[round(lowercase__ )]
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase = det % len(self.key_string )
__lowercase = len(self.key_string )
if greatest_common_divisor(lowercase__ ,len(self.key_string ) ) != 1:
__lowercase = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
__lowercase = [char for char in text.upper() if char in self.key_string]
__lowercase = chars[-1]
while len(lowercase__ ) % self.break_key != 0:
chars.append(lowercase__ )
return "".join(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
__lowercase = self.process_text(text.upper() )
__lowercase = ''''''
for i in range(0 ,len(lowercase__ ) - self.break_key + 1 ,self.break_key ):
__lowercase = text[i : i + self.break_key]
__lowercase = [self.replace_letters(lowercase__ ) for char in batch]
__lowercase = numpy.array([vec] ).T
__lowercase = self.modulus(self.encrypt_key.dot(lowercase__ ) ).T.tolist()[
0
]
__lowercase = ''''''.join(
self.replace_digits(lowercase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase = det % len(self.key_string )
__lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowercase = i
break
__lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
__lowercase = self.make_decrypt_key()
__lowercase = self.process_text(text.upper() )
__lowercase = ''''''
for i in range(0 ,len(lowercase__ ) - self.break_key + 1 ,self.break_key ):
__lowercase = text[i : i + self.break_key]
__lowercase = [self.replace_letters(lowercase__ ) for char in batch]
__lowercase = numpy.array([vec] ).T
__lowercase = self.modulus(decrypt_key.dot(lowercase__ ) ).T.tolist()[0]
__lowercase = ''''''.join(
self.replace_digits(lowercase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Enter the order of the encryption key: ''' ) )
__lowercase = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(A__ ):
__lowercase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowercase = HillCipher(numpy.array(A__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
__lowercase = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
__lowercase = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowercase = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 624 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
from math import isqrt
def _A ( A__ ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(A__ ) + 1 ) )
def _A ( A__ = 10**6 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 1
__lowercase = 7
while prime_candidate < max_prime:
primes_count += is_prime(A__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = {'''facebook/bart-base''': BartForConditionalGeneration}
lowerCAmelCase__ = {'''facebook/bart-base''': BartTokenizer}
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=A__ , default=A__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=A__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=A__ , default=A__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=A__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A__ , )
parser.add_argument(
'''--config_name''' , type=A__ , default=A__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=A__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=A__ , default=A__ , help='''Where to store the final ONNX file.''' )
__lowercase = parser.parse_args()
return args
def _A ( A__ , A__="cpu" ):
"""simple docstring"""
__lowercase = model_dict[model_name].from_pretrained(A__ ).to(A__ )
__lowercase = tokenizer_dict[model_name].from_pretrained(A__ )
if model_name in ["facebook/bart-base"]:
__lowercase = 0
__lowercase = None
__lowercase = 0
return huggingface_model, tokenizer
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
model.eval()
__lowercase = None
__lowercase = torch.jit.script(BARTBeamSearchGenerator(A__ ) )
with torch.no_grad():
__lowercase = '''My friends are cool but they eat too many carbs.'''
__lowercase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
__lowercase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=A__ , max_length=A__ , early_stopping=A__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=A__ , )
logger.info('''Model exported to {}'''.format(A__ ) )
__lowercase = remove_dup_initializers(os.path.abspath(A__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(A__ ) )
__lowercase = onnxruntime.InferenceSession(A__ )
__lowercase = ort_sess.run(
A__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(A__ ),
'''max_length''': np.array(A__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _A ( ):
"""simple docstring"""
__lowercase = parse_args()
__lowercase = 5
__lowercase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__lowercase = torch.device(args.device )
__lowercase , __lowercase = load_model_tokenizer(args.model_name_or_path , A__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(A__ )
if args.max_length:
__lowercase = args.max_length
if args.num_beams:
__lowercase = args.num_beams
if args.output_file_path:
__lowercase = args.output_file_path
else:
__lowercase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(A__ , A__ , A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import math
def _A ( A__ , A__ ):
"""simple docstring"""
if (
not isinstance(A__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _A ( A__ , A__ ):
"""simple docstring"""
if (
not isinstance(A__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''YolosFeatureExtractor''']
lowerCAmelCase__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 |
'''simple docstring'''
import random
from typing import Any
def _A ( A__ ):
"""simple docstring"""
for _ in range(len(A__ ) ):
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 624 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=1_3 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Optional[Any]=2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Any=1_6 ,lowercase__ : List[str]=[1, 2, 1] ,lowercase__ : List[str]=[2, 2, 4] ,lowercase__ : Optional[Any]=2 ,lowercase__ : Union[str, Any]=2.0 ,lowercase__ : Optional[int]=True ,lowercase__ : Tuple=0.0 ,lowercase__ : int=0.0 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Optional[Any]=False ,lowercase__ : List[Any]=True ,lowercase__ : Dict=0.0_2 ,lowercase__ : Union[str, Any]=1e-5 ,lowercase__ : List[str]=True ,lowercase__ : Optional[int]=None ,lowercase__ : str=True ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Dict=8 ,lowercase__ : Optional[int]=["stage1", "stage2", "stage3"] ,lowercase__ : Dict=[1, 2, 3] ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = MaskFormerSwinModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ):
__lowercase = MaskFormerSwinBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(lowercase__ ):
__lowercase = ['''stem''']
__lowercase = MaskFormerSwinBackbone(config=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ):
return
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase__ )
@unittest.skip('''Swin does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ,(padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase__ : Union[str, Any] ):
__lowercase = 0
return t
def check_equivalence(lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any]={} ):
with torch.no_grad():
__lowercase = model(**lowercase__ ,return_dict=lowercase__ ,**lowercase__ )
__lowercase = model(**lowercase__ ,return_dict=lowercase__ ,**lowercase__ ).to_tuple()
def recursive_check(lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ):
if isinstance(lowercase__ ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase__ ,lowercase__ ):
recursive_check(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(lowercase__ ,lowercase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase__ ) ,set_nan_tensor_to_zero(lowercase__ ) ,atol=1e-5 ) ,msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase__ ).any()} and `inf`: {torch.isinf(lowercase__ )}. Dict has"
F" `nan`: {torch.isnan(lowercase__ ).any()} and `inf`: {torch.isinf(lowercase__ )}."
) ,)
recursive_check(lowercase__ ,lowercase__ )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
check_equivalence(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
check_equivalence(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
check_equivalence(lowercase__ ,lowercase__ ,lowercase__ ,{'''output_hidden_states''': True} )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
check_equivalence(lowercase__ ,lowercase__ ,lowercase__ ,{'''output_hidden_states''': True} )
@require_torch
class lowercase_ (unittest.TestCase , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase__ )
backbone.to(lowercase__ )
backbone.eval()
__lowercase = backbone(**lowercase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,lowercase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase__ ,output_hidden_states=lowercase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase__ ,output_attentions=lowercase__ )
self.assertIsNotNone(outputs.attentions )
| 624 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''Salesforce/codegen-350M-mono''': 2048,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[str] = CodeGenTokenizer
def __init__( self : int ,lowercase__ : Any=None ,lowercase__ : Tuple=None ,lowercase__ : Any=None ,lowercase__ : Any="<|endoftext|>" ,lowercase__ : str="<|endoftext|>" ,lowercase__ : Dict="<|endoftext|>" ,lowercase__ : Union[str, Any]=False ,**lowercase__ : Optional[int] ,):
super().__init__(
lowercase__ ,lowercase__ ,tokenizer_file=lowercase__ ,unk_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,add_prefix_space=lowercase__ ,**lowercase__ ,)
if kwargs.pop('''add_bos_token''' ,lowercase__ ):
__lowercase = kwargs.pop('''name_or_path''' ,'''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,lowercase__ ) != add_prefix_space:
__lowercase = getattr(lowercase__ ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase__ )
__lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : Dict ,*lowercase__ : str ,**lowercase__ : List[str] ):
__lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*lowercase__ : List[Any] ,**lowercase__ : Dict ):
__lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ )
return tuple(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,lowercase__ : bool = False ,lowercase__ : bool = None ,lowercase__ : Optional[List[str]] = None ,**lowercase__ : Optional[Any] ,):
__lowercase = super().decode(
token_ids=lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ ,**lowercase__ ,)
if truncate_before_pattern is not None and len(lowercase__ ) > 0:
__lowercase = self.truncate(lowercase__ ,lowercase__ )
return decoded_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ):
def find_re(lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,lowercase__ : Any ):
__lowercase = pattern.search(lowercase__ ,lowercase__ )
return m.start() if m else -1
__lowercase = [re.compile(lowercase__ ,re.MULTILINE ) for pattern in truncate_before_pattern]
__lowercase = list(re.finditer('''^print''' ,lowercase__ ,re.MULTILINE ) )
if len(lowercase__ ) > 1:
__lowercase = completion[: prints[1].start()]
__lowercase = list(re.finditer('''^def''' ,lowercase__ ,re.MULTILINE ) )
if len(lowercase__ ) > 1:
__lowercase = completion[: defs[1].start()]
__lowercase = 0
__lowercase = [
pos for pos in [find_re(lowercase__ ,lowercase__ ,lowercase__ ) for terminal in terminals] if pos != -1
]
if len(lowercase__ ) > 0:
return completion[: min(lowercase__ )]
else:
return completion
| 624 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
return None
class lowercase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
return None
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase__ ,'''tf''' ,1_2 ,**lowercase__ )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase__ ,'''pt''' ,1_2 ,**lowercase__ )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
from transformers import BertModel
__lowercase = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowercase__ ) )
vocab_file.flush()
__lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__lowercase = BertModel(BertConfig(vocab_size=len(lowercase__ ) ) )
model.save_pretrained(lowercase__ )
self._test_export(lowercase__ ,'''pt''' ,1_2 ,lowercase__ )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase = self._test_export(lowercase__ ,'''tf''' ,1_2 ,**lowercase__ )
__lowercase = quantize(Path(lowercase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase = self._test_export(lowercase__ ,'''pt''' ,1_2 ,**lowercase__ )
__lowercase = quantize(lowercase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[str]=None ,**lowercase__ : Union[str, Any] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ )
return path
except Exception as e:
self.fail(lowercase__ )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
from transformers import BertModel
__lowercase = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__lowercase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowercase__ ,lowercase__ ,'''pt''' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
from transformers import TFBertModel
__lowercase = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__lowercase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowercase__ ,lowercase__ ,'''tf''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[int] ):
__lowercase = FeatureExtractionPipeline(lowercase__ ,lowercase__ )
__lowercase = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
__lowercase , __lowercase , __lowercase , __lowercase = infer_shapes(lowercase__ ,lowercase__ )
# Assert all variables are present
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,lowercase__ )
self.assertSequenceEqual(variable_names[3:] ,lowercase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] ,{0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] ,{0: '''batch'''} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
__lowercase = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
__lowercase , __lowercase = ensure_valid_input(FuncContiguousArgs() ,lowercase__ ,lowercase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase__ ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase__ ) ,set(lowercase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase__ ,(tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__lowercase , __lowercase = ensure_valid_input(FuncNonContiguousArgs() ,lowercase__ ,lowercase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase__ ) ,1 )
self.assertEqual(len(lowercase__ ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] ,'''input_ids''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) ,'''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' ,generated.as_posix() )
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowerCAmelCase__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowerCAmelCase__ = '''sshleifer/student_marian_en_ro_6_1'''
lowerCAmelCase__ = '''sshleifer/tiny-mbart'''
@require_torch
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int=False ,lowercase__ : Optional[Any]=None ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Optional[Any]=True ,lowercase__ : str=True ,):
__lowercase = self.run_trainer(
eval_steps=1 ,max_len=1_2 ,model_name=lowercase__ ,num_train_epochs=1 ,distributed=lowercase__ ,extra_args_str=lowercase__ ,predict_with_generate=lowercase__ ,do_train=lowercase__ ,do_eval=lowercase__ ,do_predict=lowercase__ ,)
__lowercase = TrainerState.load_from_json(os.path.join(lowercase__ ,'''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowercase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowercase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowercase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] ,lowercase__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self : str ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.run_seqaseq_quick(distributed=lowercase__ )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.run_seqaseq_quick(distributed=lowercase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.run_seqaseq_quick(distributed=lowercase__ ,extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self : int ):
self.run_seqaseq_quick(distributed=lowercase__ ,extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.run_seqaseq_quick(distributed=lowercase__ ,extra_args_str='''--sharded_ddp zero_dp_2''' ,predict_with_generate=lowercase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.run_seqaseq_quick(
distributed=lowercase__ ,extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' ,predict_with_generate=lowercase__ )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowercase__ ,extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase__ ,extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowercase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowercase = experiments[experiment_id]
__lowercase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowercase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase__ ,extra_args_str=data['''extra_args_str'''] )
__lowercase = len(re.findall(lowercase__ ,cl.err ) )
self.assertEqual(lowercase__ ,data['''n_matches'''] )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.run_trainer(
eval_steps=2 ,max_len=1_2_8 ,model_name=lowercase__ ,learning_rate=3e-4 ,num_train_epochs=1_0 ,distributed=lowercase__ ,)
# Check metrics
__lowercase = TrainerState.load_from_json(os.path.join(lowercase__ ,'''trainer_state.json''' ) ).log_history
__lowercase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowercase = eval_metrics[0]
__lowercase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] ,lowercase__ )
# test if do_predict saves generations and metrics
__lowercase = os.listdir(lowercase__ )
__lowercase = {os.path.basename(lowercase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase__ : str ) -> Tuple[int, float]:
__lowercase = '''--skip_memory_metrics 0'''
__lowercase = self.run_trainer(
max_len=1_2_8 ,model_name=lowercase__ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=lowercase__ ,distributed=lowercase__ ,extra_args_str=lowercase__ ,do_eval=lowercase__ ,do_predict=lowercase__ ,n_gpus_to_use=1 ,)
# Check metrics
__lowercase = TrainerState.load_from_json(Path(lowercase__ ,'''trainer_state.json''' ) ).log_history
__lowercase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__lowercase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__lowercase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowercase , __lowercase , __lowercase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowercase , __lowercase , __lowercase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowercase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowercase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowercase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowercase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowercase = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase__ ,lowercase__ ,'''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" ,)
self.assertGreater(
lowercase__ ,lowercase__ ,'''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" ,)
self.assertEqual(
lowercase__ ,lowercase__ ,F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : float = 3e-3 ,lowercase__ : str = "adafactor" ,lowercase__ : bool = False ,lowercase__ : str = None ,lowercase__ : int = 0 ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : int = None ,):
__lowercase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(lowercase__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(lowercase__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
__lowercase = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(lowercase__ )}\n ".split()
__lowercase = '''
--do_predict
'''.split()
__lowercase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowercase = get_gpu_count()
__lowercase = get_torch_dist_unique_port()
__lowercase = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
__lowercase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase__ ,env=self.get_env() )
else:
__lowercase = ['''run_translation.py'''] + args
with patch.object(lowercase__ ,'''argv''' ,lowercase__ ):
main()
return output_dir
| 624 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
def _A ( ):
"""simple docstring"""
__lowercase = os.getenv('''DATASETS_VERBOSITY''' , A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _A ( ):
"""simple docstring"""
return __name__.split('''.''' )[0]
def _A ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _A ( ):
"""simple docstring"""
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _A ( ):
"""simple docstring"""
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _A ( A__ = None ):
"""simple docstring"""
if name is None:
__lowercase = _get_library_name()
return logging.getLogger(A__ )
def _A ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def _A ( A__ ):
"""simple docstring"""
_get_library_root_logger().setLevel(A__ )
def _A ( ):
"""simple docstring"""
return set_verbosity(A__ )
def _A ( ):
"""simple docstring"""
return set_verbosity(A__ )
def _A ( ):
"""simple docstring"""
return set_verbosity(A__ )
def _A ( ):
"""simple docstring"""
return set_verbosity(A__ )
def _A ( ):
"""simple docstring"""
__lowercase = False
def _A ( ):
"""simple docstring"""
__lowercase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,*lowercase__ : Any ,**lowercase__ : int ): # pylint: disable=unused-argument
__lowercase = args[0] if args else None
def __iter__( self : Any ):
return iter(self._iterator )
def __getattr__( self : Union[str, Any] ,lowercase__ : List[str] ):
def empty_fn(*lowercase__ : Dict ,**lowercase__ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ):
return self
def __exit__( self : str ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : List[str] ):
return
lowerCAmelCase__ = True
class lowercase_ :
"""simple docstring"""
def __call__( self : Optional[int] ,*lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=False ,**lowercase__ : Optional[Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase__ ,**lowercase__ )
else:
return EmptyTqdm(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,*lowercase__ : Any ,**lowercase__ : str ):
__lowercase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def _A ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _A ( ):
"""simple docstring"""
global _tqdm_active
__lowercase = True
def _A ( ):
"""simple docstring"""
global _tqdm_active
__lowercase = False
| 624 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 624 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
SCREAMING_SNAKE_CASE : Optional[int] = 'AutoTokenizer'
def __init__( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ ,lowercase__ )
# add QFormer tokenizer
__lowercase = qformer_tokenizer
def __call__( self : Tuple ,lowercase__ : ImageInput = None ,lowercase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase__ : bool = True ,lowercase__ : Union[bool, str, PaddingStrategy] = False ,lowercase__ : Union[bool, str, TruncationStrategy] = None ,lowercase__ : Optional[int] = None ,lowercase__ : int = 0 ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = True ,lowercase__ : Optional[Union[str, TensorType]] = None ,**lowercase__ : Optional[int] ,):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowercase = BatchFeature()
if text is not None:
__lowercase = self.tokenizer(
text=lowercase__ ,add_special_tokens=lowercase__ ,padding=lowercase__ ,truncation=lowercase__ ,max_length=lowercase__ ,stride=lowercase__ ,pad_to_multiple_of=lowercase__ ,return_attention_mask=lowercase__ ,return_overflowing_tokens=lowercase__ ,return_special_tokens_mask=lowercase__ ,return_offsets_mapping=lowercase__ ,return_token_type_ids=lowercase__ ,return_length=lowercase__ ,verbose=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ,)
encoding.update(lowercase__ )
__lowercase = self.qformer_tokenizer(
text=lowercase__ ,add_special_tokens=lowercase__ ,padding=lowercase__ ,truncation=lowercase__ ,max_length=lowercase__ ,stride=lowercase__ ,pad_to_multiple_of=lowercase__ ,return_attention_mask=lowercase__ ,return_overflowing_tokens=lowercase__ ,return_special_tokens_mask=lowercase__ ,return_offsets_mapping=lowercase__ ,return_token_type_ids=lowercase__ ,return_length=lowercase__ ,verbose=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ,)
__lowercase = qformer_text_encoding.pop('''input_ids''' )
__lowercase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ )
encoding.update(lowercase__ )
return encoding
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*lowercase__ : Any ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : List[Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : List[str] ):
if os.path.isfile(lowercase__ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
__lowercase = os.path.join(lowercase__ ,'''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowercase__ )
return super().save_pretrained(lowercase__ ,**lowercase__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,lowercase__ : Dict ,**lowercase__ : Any ):
__lowercase = AutoTokenizer.from_pretrained(lowercase__ ,subfolder='''qformer_tokenizer''' )
__lowercase = cls._get_arguments_from_pretrained(lowercase__ ,**lowercase__ )
args.append(lowercase__ )
return cls(*lowercase__ )
| 624 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase__ = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _A ( A__ , A__ , A__ , A__ , A__=False , A__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__lowercase , __lowercase , __lowercase , __lowercase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__lowercase = cached_file(A__ , A__ , force_download=not use_cached_models )
__lowercase = config_class.from_json_file(A__ )
__lowercase = True
__lowercase = True
print(F"Building TensorFlow model from configuration: {config}" )
__lowercase = model_class(A__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__lowercase = cached_file(
A__ , A__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__lowercase = load_pytorch_checkpoint_in_tfa_model(A__ , A__ )
if compare_with_pt_model:
__lowercase = tf_model(tf_model.dummy_inputs , training=A__ ) # build the network
__lowercase = torch.load(A__ , map_location='''cpu''' )
__lowercase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
with torch.no_grad():
__lowercase = pt_model(**pt_model.dummy_inputs )
__lowercase = pto[0].numpy()
__lowercase = tfo[0].numpy()
__lowercase = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(A__ , save_format='''h5''' )
def _A ( A__ , A__ , A__=None , A__=None , A__=False , A__=False , A__=False , A__=False , ):
"""simple docstring"""
if args_model_type is None:
__lowercase = list(MODEL_CLASSES.keys() )
else:
__lowercase = [args_model_type]
for j, model_type in enumerate(A__ , start=1 ):
print('''=''' * 100 )
print(F" Converting model type {j}/{len(A__ )}: {model_type}" )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__lowercase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__lowercase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A__ , A__ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__lowercase = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}" )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__lowercase = cached_file(A__ , A__ , force_download=not use_cached_models )
else:
__lowercase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__lowercase = cached_file(A__ , A__ , force_download=not use_cached_models )
else:
__lowercase = model_shortcut_name
if os.path.isfile(A__ ):
__lowercase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=A__ , pytorch_checkpoint_path=A__ , config_file=A__ , tf_dump_path=os.path.join(A__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=A__ , )
if remove_cached_files:
os.remove(A__ )
os.remove(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
f'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowerCAmelCase__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ = False
try:
lowerCAmelCase__ = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : str = None ,lowercase__ : list = [] ):
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = '''*'''
else:
__lowercase = '''➔ '''
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,3_2 ,lowercase__ )
else:
forceWrite(self.choices[index] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : int ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(lowercase__ )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Direction ,lowercase__ : int = 1 ):
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowercase__ )
move_cursor(lowercase__ ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def SCREAMING_SNAKE_CASE ( self : str ):
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowercase__ )] for number in range(1_0 )] )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,lowercase__ )
else:
return
else:
return
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,'''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' ,'''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' ,'''\n''' )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowercase__ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position ,'''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,'''UP''' )
clear_line()
self.write_choice(lowercase__ ,'''\n''' )
return choice
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCAmelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {f'funnel-transformer/{name}': 512 for name in _model_names}
lowerCAmelCase__ = {f'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : List[str] = FunnelTokenizer
SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : int = 2
def __init__( self : str ,lowercase__ : List[str]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]="<unk>" ,lowercase__ : List[Any]="<sep>" ,lowercase__ : List[Any]="<pad>" ,lowercase__ : str="<cls>" ,lowercase__ : List[str]="<mask>" ,lowercase__ : Any="<s>" ,lowercase__ : str="</s>" ,lowercase__ : int=True ,lowercase__ : Dict=True ,lowercase__ : int=None ,lowercase__ : Dict="##" ,**lowercase__ : Optional[Any] ,):
super().__init__(
lowercase__ ,tokenizer_file=lowercase__ ,do_lower_case=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,clean_text=lowercase__ ,tokenize_chinese_chars=lowercase__ ,strip_accents=lowercase__ ,wordpieces_prefix=lowercase__ ,**lowercase__ ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowercase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowercase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowercase__ ) != tokenize_chinese_chars
):
__lowercase = getattr(lowercase__ ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowercase__ )
__lowercase = do_lower_case
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : int=None ):
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ )
return tuple(lowercase__ )
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
__lowercase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowercase = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=A__ , output_all_encodings=A__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , A__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowercase = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__lowercase = os.path.join(get_home_dir() , '''models''' )
__lowercase = _load_vocab(A__ , A__ , A__ , cls=A__ )
__lowercase = nlp.model.BERTModel(
A__ , len(A__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=A__ , use_token_type_embed=A__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=A__ , use_decoder=A__ , )
original_bort.load_parameters(A__ , cast_dtype=A__ , ignore_extra=A__ )
__lowercase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowercase = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(A__ ),
}
__lowercase = BertConfig.from_dict(A__ )
__lowercase = BertForMaskedLM(A__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A__ , A__ ):
__lowercase = hf_param.shape
__lowercase = to_torch(params[gluon_param] )
__lowercase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowercase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowercase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowercase = layer.attention.self
__lowercase = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__lowercase = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__lowercase = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__lowercase = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__lowercase = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__lowercase = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__lowercase = layer.attention.output
__lowercase = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__lowercase = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__lowercase = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__lowercase = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__lowercase = layer.intermediate
__lowercase = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__lowercase = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__lowercase = layer.output
__lowercase = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__lowercase = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__lowercase = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__lowercase = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowercase = RobertaTokenizer.from_pretrained('''roberta-base''' )
__lowercase = tokenizer.encode_plus(A__ )['''input_ids''']
# Get gluon output
__lowercase = mx.nd.array([input_ids] )
__lowercase = original_bort(inputs=A__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A__ )
__lowercase = BertModel.from_pretrained(A__ )
hf_bort_model.eval()
__lowercase = tokenizer.encode_plus(A__ , return_tensors='''pt''' )
__lowercase = hf_bort_model(**A__ )[0]
__lowercase = output_gluon[0].asnumpy()
__lowercase = output_hf[0].detach().numpy()
__lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowercase = np.allclose(A__ , A__ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 624 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
while number > 0:
__lowercase = number % 10
sum_of_digits += last_digit
__lowercase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = factorial(A__ )
__lowercase = split_and_add(A__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=A__ , default=A__ , required=A__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=A__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=A__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=A__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=A__ , default=0 , help='''cuda_id.''' , )
__lowercase = parser.parse_args()
return args
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if not len(A__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__lowercase , __lowercase = imgs[0].size
__lowercase = Image.new('''RGB''' , size=(cols * w, rows * h) )
__lowercase , __lowercase = grid.size
for i, img in enumerate(A__ ):
grid.paste(A__ , box=(i % cols * w, i // cols * h) )
return grid
def _A ( A__ , A__="robotic cat with wings" , A__=7.5 , A__=50 , A__=1 , A__=42 , ):
"""simple docstring"""
__lowercase = torch.Generator(pipeline.device ).manual_seed(A__ )
__lowercase = pipeline(
A__ , guidance_scale=A__ , num_inference_steps=A__ , generator=A__ , num_images_per_prompt=A__ , ).images
__lowercase = int(math.sqrt(A__ ) )
__lowercase = image_grid(A__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = old_name
if "patch_embed" in old_name:
__lowercase , __lowercase , __lowercase = old_name.split('''.''' )
if layer == "0":
__lowercase = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowercase = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowercase = old_name.replace('''3''' , '''convolution2''' )
else:
__lowercase = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , A__ ):
__lowercase = R'''\b\d{2}\b'''
if bool(re.search(A__ , A__ ) ):
__lowercase = re.search(R'''\d\.\d\d.''' , A__ ).group()
else:
__lowercase = re.search(R'''\d\.\d.''' , A__ ).group()
if int(match[0] ) < 6:
__lowercase = old_name.replace(A__ , '''''' )
__lowercase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowercase = '''intermediate_stages.''' + trimmed_name
else:
__lowercase = old_name.replace(A__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowercase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowercase = str(int(match[2] ) - num_meta4D_last_stage )
__lowercase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowercase = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowercase = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowercase = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowercase = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowercase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , A__ ):
__lowercase = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowercase = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowercase = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowercase = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowercase = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowercase = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowercase = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowercase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowercase = new_name.replace('''norm''' , '''layernorm''' )
__lowercase = '''efficientformer.''' + new_name
else:
__lowercase = '''efficientformer.encoder.''' + new_name
return new_name
def _A ( A__ , A__ ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__lowercase = checkpoint.pop(A__ )
__lowercase = val
return checkpoint
def _A ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
__lowercase = EfficientFormerConfig.from_json_file(A__ )
__lowercase = EfficientFormerForImageClassificationWithTeacher(A__ )
__lowercase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowercase = config.depths[-1] - config.num_metaad_blocks + 1
__lowercase = convert_torch_checkpoint(A__ , A__ )
model.load_state_dict(A__ )
model.eval()
__lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowercase = prepare_img()
__lowercase = 256
__lowercase = 224
__lowercase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowercase = processor(images=A__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowercase = Compose(
[
Resize(A__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A__ ),
ToTensor(),
Normalize(A__ , A__ ),
] )
__lowercase = image_transforms(A__ ).unsqueeze(0 )
assert torch.allclose(A__ , A__ )
__lowercase = model(A__ )
__lowercase = outputs.logits
__lowercase = (1, 1000)
if "l1" in model_name:
__lowercase = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , A__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowercase = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , A__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowercase = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(A__ )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message='''Add model''' , use_temp_dir=A__ , )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message='''Add image processor''' , use_temp_dir=A__ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 624 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 1 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 624 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if len(A__ ) <= 1:
return [tuple(A__ )]
__lowercase = []
def generate(A__ , A__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , A__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase = arr[k - 1], arr[0]
generate(k - 1 , A__ )
generate(len(A__ ) , A__ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 624 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 'git_vision_model'
def __init__( self : List[str] ,lowercase__ : List[str]=7_6_8 ,lowercase__ : Optional[Any]=3_0_7_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Any=1_2 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=2_2_4 ,lowercase__ : List[Any]=1_6 ,lowercase__ : List[str]="quick_gelu" ,lowercase__ : List[str]=1e-5 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0_2 ,**lowercase__ : Tuple ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ,lowercase__ : Union[str, os.PathLike] ,**lowercase__ : Tuple ):
cls._set_token_in_kwargs(lowercase__ )
__lowercase , __lowercase = cls.get_config_dict(lowercase__ ,**lowercase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase__ ,**lowercase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'git'
def __init__( self : Union[str, Any] ,lowercase__ : Dict=None ,lowercase__ : str=3_0_5_2_2 ,lowercase__ : List[str]=7_6_8 ,lowercase__ : Any=6 ,lowercase__ : Optional[int]=1_2 ,lowercase__ : List[str]=3_0_7_2 ,lowercase__ : Any="gelu" ,lowercase__ : Dict=0.1 ,lowercase__ : List[Any]=0.1 ,lowercase__ : int=1_0_2_4 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : List[Any]=1e-1_2 ,lowercase__ : List[Any]=0 ,lowercase__ : Union[str, Any]="absolute" ,lowercase__ : Tuple=True ,lowercase__ : Union[str, Any]=False ,lowercase__ : Union[str, Any]=1_0_1 ,lowercase__ : Union[str, Any]=1_0_2 ,lowercase__ : Tuple=None ,**lowercase__ : int ,):
super().__init__(bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,pad_token_id=lowercase__ ,**lowercase__ )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__lowercase = GitVisionConfig(**lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = num_image_with_embedding
__lowercase = bos_token_id
__lowercase = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase__ = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ):
"""simple docstring"""
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def _A ( A__ ):
"""simple docstring"""
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['input_ids', 'attention_mask']
def __init__( self : Dict ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : Optional[int]="replace" ,lowercase__ : List[Any]="<s>" ,lowercase__ : Any="</s>" ,lowercase__ : int="</s>" ,lowercase__ : Tuple="<s>" ,lowercase__ : Tuple="<unk>" ,lowercase__ : Tuple="<pad>" ,lowercase__ : List[str]="<mask>" ,lowercase__ : Dict=False ,**lowercase__ : int ,):
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else bos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else eos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else sep_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else cls_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else unk_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
super().__init__(
errors=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,add_prefix_space=lowercase__ ,**lowercase__ ,)
with open(lowercase__ ,encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(lowercase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ ,encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Dict ):
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[Any] ):
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowercase__ )
__lowercase = get_pairs(lowercase__ )
if not pairs:
return token
while True:
__lowercase = min(lowercase__ ,key=lambda lowercase__ : self.bpe_ranks.get(lowercase__ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowercase__ ):
try:
__lowercase = word.index(lowercase__ ,lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowercase__ )
__lowercase = new_word
if len(lowercase__ ) == 1:
break
else:
__lowercase = get_pairs(lowercase__ )
__lowercase = ''' '''.join(lowercase__ )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ):
__lowercase = []
for token in re.findall(self.pat ,lowercase__ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__ ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ):
return self.encoder.get(lowercase__ ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ):
return self.decoder.get(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
__lowercase = ''''''.join(lowercase__ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowercase__ ,ensure_ascii=lowercase__ ) + '''\n''' )
__lowercase = 0
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowercase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(lowercase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple=False ,**lowercase__ : Optional[int] ):
__lowercase = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase__ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
| 624 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.task_name.lower()
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'train'
SCREAMING_SNAKE_CASE : Optional[Any] = 'dev'
SCREAMING_SNAKE_CASE : Optional[Any] = 'test'
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : GlueDataTrainingArguments
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : List[InputFeatures]
def __init__( self : List[Any] ,lowercase__ : GlueDataTrainingArguments ,lowercase__ : PreTrainedTokenizerBase ,lowercase__ : Optional[int] = None ,lowercase__ : Union[str, Split] = Split.train ,lowercase__ : Optional[str] = None ,):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ,lowercase__ ,)
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(lowercase__ ,lowercase__ ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" ,)
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + '''.lock'''
with FileLock(lowercase__ ):
if os.path.exists(lowercase__ ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(lowercase__ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" ,time.time() - start )
else:
logger.info(F"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
lowercase__ ,lowercase__ ,max_length=args.max_seq_length ,label_list=lowercase__ ,output_mode=self.output_mode ,)
__lowercase = time.time()
torch.save(self.features ,lowercase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : int ):
return len(self.features )
def __getitem__( self : Tuple ,lowercase__ : Optional[Any] ):
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.label_list
| 624 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 1 |
'''simple docstring'''
import cmath
import math
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = math.radians(A__ )
__lowercase = math.radians(A__ )
# Convert voltage and current to rectangular form
__lowercase = cmath.rect(A__ , A__ )
__lowercase = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
import random
from typing import Any
def _A ( A__ ):
"""simple docstring"""
for _ in range(len(A__ ) ):
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 624 | 1 |
'''simple docstring'''
lowerCAmelCase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowerCAmelCase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowerCAmelCase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowerCAmelCase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowerCAmelCase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 624 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase__ = True
except ImportError:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( A__ ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' ,action='''store_true''' ,help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' ,type=lowercase__ ,help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' ,type=lowercase__ ,help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase__ )
def __init__( self : List[Any] ,lowercase__ : bool ,lowercase__ : str ,lowercase__ : Tuple=None ,*lowercase__ : int ):
__lowercase = testing
__lowercase = testing_file
__lowercase = path
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowercase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(lowercase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
__lowercase = (
Path(lowercase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowercase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase__ ) )
else:
with open(self._testing_file ,'''r''' ) as configuration_file:
__lowercase = json.load(lowercase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=lowercase__ ,extra_context=lowercase__ ,)
__lowercase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' ,'''r''' ) as configuration_file:
__lowercase = json.load(lowercase__ )
__lowercase = configuration['''lowercase_modelname''']
__lowercase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"{directory}/configuration.json" )
__lowercase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__lowercase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__lowercase = '''Flax''' in generate_tensorflow_pytorch_and_flax
__lowercase = F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" ,exist_ok=lowercase__ )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" ,'''w''' ):
pass
shutil.move(
F"{directory}/__init__.py" ,F"{model_dir}/__init__.py" ,)
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" ,F"{model_dir}/configuration_{lowercase_model_name}.py" ,)
def remove_copy_lines(lowercase__ : Optional[int] ):
with open(lowercase__ ,'''r''' ) as f:
__lowercase = f.readlines()
with open(lowercase__ ,'''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" ,F"{model_dir}/modeling_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" ,F"{model_dir}/modeling_tf_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" ,F"{model_dir}/modeling_flax_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" ,F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" ,)
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" ,F"{model_dir}/tokenization_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" ,F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase__ : str ,lowercase__ : str ,lowercase__ : List[str] ):
# Create temp file
__lowercase , __lowercase = mkstemp()
__lowercase = False
with fdopen(lowercase__ ,'''w''' ) as new_file:
with open(lowercase__ ) as old_file:
for line in old_file:
new_file.write(lowercase__ )
if line_to_copy_below in line:
__lowercase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase__ )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowercase__ ,lowercase__ )
# Remove original file
remove(lowercase__ )
# Move new file
move(lowercase__ ,lowercase__ )
def skip_units(lowercase__ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase__ : Any ):
with open(lowercase__ ) as datafile:
__lowercase = []
__lowercase = False
__lowercase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowercase = line.split('''"''' )[1]
__lowercase = skip_units(lowercase__ )
elif "# Below: " in line and "##" not in line:
__lowercase = line.split('''"''' )[1]
__lowercase = skip_units(lowercase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = []
elif "# Replace with" in line and "##" not in line:
__lowercase = []
elif "##" not in line:
lines_to_copy.append(lowercase__ )
remove(lowercase__ )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowercase__ )
| 624 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 | 1 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _A ( A__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase = reshape_weight_for_sd(A__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__lowercase = k[: -len('''.q_proj.weight''' )]
__lowercase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__lowercase = [None, None, None]
__lowercase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__lowercase = k[: -len('''.q_proj.bias''' )]
__lowercase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__lowercase = [None, None, None]
__lowercase = v
continue
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
return new_state_dict
def _A ( A__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'SpeechT5FeatureExtractor'
SCREAMING_SNAKE_CASE : Tuple = 'SpeechT5Tokenizer'
def __init__( self : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
__lowercase = kwargs.pop('''audio''' ,lowercase__ )
__lowercase = kwargs.pop('''text''' ,lowercase__ )
__lowercase = kwargs.pop('''text_target''' ,lowercase__ )
__lowercase = kwargs.pop('''audio_target''' ,lowercase__ )
__lowercase = kwargs.pop('''sampling_rate''' ,lowercase__ )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__lowercase = self.feature_extractor(lowercase__ ,*lowercase__ ,sampling_rate=lowercase__ ,**lowercase__ )
elif text is not None:
__lowercase = self.tokenizer(lowercase__ ,**lowercase__ )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=lowercase__ ,*lowercase__ ,sampling_rate=lowercase__ ,**lowercase__ )
__lowercase = targets['''input_values''']
elif text_target is not None:
__lowercase = self.tokenizer(lowercase__ ,**lowercase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : int ,**lowercase__ : Any ):
__lowercase = kwargs.pop('''input_values''' ,lowercase__ )
__lowercase = kwargs.pop('''input_ids''' ,lowercase__ )
__lowercase = kwargs.pop('''labels''' ,lowercase__ )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__lowercase = self.feature_extractor.pad(lowercase__ ,*lowercase__ ,**lowercase__ )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(lowercase__ ,**lowercase__ )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase__ ,lowercase__ ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(lowercase__ ,**lowercase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(lowercase__ ,*lowercase__ ,**lowercase__ )
__lowercase = feature_size_hack
__lowercase = targets['''input_values''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : str ,**lowercase__ : str ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,*lowercase__ : Tuple ,**lowercase__ : List[Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
| 624 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def _A ( A__ ):
"""simple docstring"""
if "emb" in name:
__lowercase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__lowercase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__lowercase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__lowercase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__lowercase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__lowercase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__lowercase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__lowercase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__lowercase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__lowercase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(A__ )
__lowercase = rename_keys(A__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def _A ( A__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowercase = 1024
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 1536
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 2048
__lowercase = 48
__lowercase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__lowercase = MusicgenDecoderConfig(
hidden_size=A__ , ffn_dim=hidden_size * 4 , num_hidden_layers=A__ , num_attention_heads=A__ , )
return config
@torch.no_grad()
def _A ( A__ , A__=None , A__=None , A__="cpu" ):
"""simple docstring"""
__lowercase = MusicGen.get_pretrained(A__ , device=A__ )
__lowercase = decoder_config_from_checkpoint(A__ )
__lowercase = fairseq_model.lm.state_dict()
__lowercase , __lowercase = rename_state_dict(
A__ , hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained('''t5-base''' )
__lowercase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__lowercase = MusicgenForCausalLM(A__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase , __lowercase = decoder.load_state_dict(A__ , strict=A__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A__ )
if len(A__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(A__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=A__ , audio_encoder=A__ , decoder=A__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A__ )
# check we can do a forward pass
__lowercase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowercase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowercase = model(input_ids=A__ , decoder_input_ids=A__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained('''t5-base''' )
__lowercase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__lowercase = MusicgenProcessor(feature_extractor=A__ , tokenizer=A__ )
# set the appropriate bos/pad token ids
__lowercase = 2048
__lowercase = 2048
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(A__ ).mkdir(exist_ok=A__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(A__ )
processor.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 624 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 624 | 1 |
'''simple docstring'''
import sys
import turtle
def _A ( A__ , A__ ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _A ( A__ , A__ , A__ , A__ , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCAmelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 624 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
default=lowerCamelCase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE : str = field(
default=lowerCamelCase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowerCamelCase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(A__ )
datasets.utils.logging.set_verbosity(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__lowercase = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = train_dataset.features['''label'''].names
if training_args.do_eval:
__lowercase = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = eval_dataset.features['''label'''].names
if training_args.do_predict:
__lowercase = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = predict_dataset.features['''label'''].names
# Labels
__lowercase = len(A__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel={str(A__ ): label for i, label in enumerate(A__ )} , labelaid={label: i for i, label in enumerate(A__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__lowercase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowercase = False
def preprocess_function(A__ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=A__ , max_length=data_args.max_seq_length , truncation=A__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowercase = min(len(A__ ) , data_args.max_train_samples )
__lowercase = train_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__lowercase = train_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(A__ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowercase = min(len(A__ ) , data_args.max_eval_samples )
__lowercase = eval_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__lowercase = eval_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__lowercase = min(len(A__ ) , data_args.max_predict_samples )
__lowercase = predict_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
__lowercase = predict_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
__lowercase = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__ ):
__lowercase = p.predictions[0] if isinstance(p.predictions , A__ ) else p.predictions
__lowercase = np.argmax(A__ , axis=1 )
return metric.compute(predictions=A__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowercase = default_data_collator
elif training_args.fpaa:
__lowercase = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 )
else:
__lowercase = None
# Initialize our Trainer
__lowercase = Trainer(
model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=A__ )
__lowercase = train_result.metrics
__lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
__lowercase = min(A__ , len(A__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , A__ )
trainer.save_metrics('''train''' , A__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate(eval_dataset=A__ )
__lowercase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ )
__lowercase = min(A__ , len(A__ ) )
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowercase , __lowercase , __lowercase = trainer.predict(A__ , metric_key_prefix='''predict''' )
__lowercase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A__ )
)
__lowercase = min(A__ , len(A__ ) )
trainer.log_metrics('''predict''' , A__ )
trainer.save_metrics('''predict''' , A__ )
__lowercase = np.argmax(A__ , axis=1 )
__lowercase = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(A__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(A__ ):
__lowercase = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'philschmid/bart-large-cnn-samsum'
SCREAMING_SNAKE_CASE : str = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
SCREAMING_SNAKE_CASE : List[str] = 'summarizer'
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : List[str] = ['text']
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ):
return self.pre_processor(lowercase__ ,return_tensors='''pt''' ,truncation=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
return self.model.generate(**lowercase__ )[0]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.pre_processor.decode(lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ )
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
'''simple docstring'''
def _A ( A__ = 10 ):
"""simple docstring"""
if not isinstance(A__ , A__ ) or n < 0:
raise ValueError('''Invalid input''' )
__lowercase = 10**n
__lowercase = 28433 * (pow(2 , 7830457 , A__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(10) = }')
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
def update_area_of_max_square(A__ , A__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowercase = update_area_of_max_square(A__ , col + 1 )
__lowercase = update_area_of_max_square(row + 1 , col + 1 )
__lowercase = update_area_of_max_square(row + 1 , A__ )
if mat[row][col]:
__lowercase = 1 + min([right, diagonal, down] )
__lowercase = max(largest_square_area[0] , A__ )
return sub_problem_sol
else:
return 0
__lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
A__ , A__ , A__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowercase = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ )
__lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ )
__lowercase = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ )
if mat[row][col]:
__lowercase = 1 + min([right, diagonal, down] )
__lowercase = max(largest_square_area[0] , A__ )
__lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowercase = [0]
__lowercase = [[-1] * cols for _ in range(A__ )]
update_area_of_max_square_using_dp_array(0 , 0 , A__ )
return largest_square_area[0]
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase = dp_array[row][col + 1]
__lowercase = dp_array[row + 1][col + 1]
__lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowercase = 1 + min(A__ , A__ , A__ )
__lowercase = max(dp_array[row][col] , A__ )
else:
__lowercase = 0
return largest_square_area
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [0] * (cols + 1)
__lowercase = [0] * (cols + 1)
__lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase = current_row[col + 1]
__lowercase = next_row[col + 1]
__lowercase = next_row[col]
if mat[row][col] == 1:
__lowercase = 1 + min(A__ , A__ , A__ )
__lowercase = max(current_row[col] , A__ )
else:
__lowercase = 0
__lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 624 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( A__ , A__ = False ):
"""simple docstring"""
if not arr:
return 0
__lowercase = 0 if allow_empty_subarrays else float('''-inf''' )
__lowercase = 0.0
for num in arr:
__lowercase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase = max(A__ , A__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str ,lowercase__ : int ,lowercase__ : Union[str, Any]=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : Any=True ,lowercase__ : Tuple=True ,lowercase__ : Tuple=False ,lowercase__ : Any=True ,lowercase__ : List[Any]=9_9 ,lowercase__ : int=3_2 ,lowercase__ : Dict=5 ,lowercase__ : Optional[int]=4 ,lowercase__ : List[str]=3_7 ,lowercase__ : Optional[int]="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : str=1_6 ,lowercase__ : Dict=2 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : int=3 ,lowercase__ : Tuple=4 ,lowercase__ : Optional[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : List[str] ):
__lowercase = DistilBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = DistilBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : int ):
__lowercase = DistilBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = DistilBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ):
__lowercase = self.num_labels
__lowercase = DistilBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = DistilBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = DistilBertModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''traced_model.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''traced_model.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 624 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , A__ , A__ , A__ )
move_disk(A__ , A__ )
move_tower(height - 1 , A__ , A__ , A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
print('''moving disk from''' , A__ , '''to''' , A__ )
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Height of hanoi: ''' ).strip() )
move_tower(A__ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
return [ord(A__ ) - 96 for elem in plain]
def _A ( A__ ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def _A ( ):
"""simple docstring"""
__lowercase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A__ )
print('''Decoded:''' , decode(A__ ) )
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624 | 1 |
'''simple docstring'''
from math import loga
def _A ( A__ ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _A ( A__ ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=A__ )
__lowercase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A__ )
EnvironmentCommand.register_subcommand(A__ )
TestCommand.register_subcommand(A__ )
RunBeamCommand.register_subcommand(A__ )
DummyDataCommand.register_subcommand(A__ )
# Parse args
__lowercase , __lowercase = parser.parse_known_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(A__ )
# Run
__lowercase = args.func(A__ , **A__ )
service.run()
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _A ( A__ ):
"""simple docstring"""
re.sub('''<n>''' , '''''' , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=A__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(A__ )
__lowercase = model.load_state_dict(A__ , strict=A__ )
print(A__ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 624 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'mra'
def __init__( self : str ,lowercase__ : Optional[int]=5_0_2_6_5 ,lowercase__ : List[str]=7_6_8 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : str=1_2 ,lowercase__ : Tuple=3_0_7_2 ,lowercase__ : str="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Dict=5_1_2 ,lowercase__ : Any=1 ,lowercase__ : int=0.0_2 ,lowercase__ : Optional[int]=1e-5 ,lowercase__ : Optional[Any]="absolute" ,lowercase__ : List[str]=4 ,lowercase__ : Dict="full" ,lowercase__ : Tuple=0 ,lowercase__ : Optional[Any]=0 ,lowercase__ : List[str]=1 ,lowercase__ : Any=0 ,lowercase__ : Any=2 ,**lowercase__ : int ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = block_per_row
__lowercase = approx_mode
__lowercase = initial_prior_first_n_blocks
__lowercase = initial_prior_diagonal_n_blocks
| 624 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : List[Any] ,lowercase__ : Tuple=1_0_0 ,lowercase__ : str=1_3 ,lowercase__ : int=3_0 ,lowercase__ : Optional[int]=2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : List[str]=True ,lowercase__ : str=True ,lowercase__ : List[Any]=3_2 ,lowercase__ : List[Any]=4 ,lowercase__ : List[str]=4 ,lowercase__ : Tuple=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : int=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Dict=3 ,lowercase__ : List[str]=None ,lowercase__ : int=[0, 1, 2, 3] ,):
__lowercase = parent
__lowercase = 1_0_0
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
__lowercase = out_indices
__lowercase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = BeitModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ):
__lowercase = BeitForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = BeitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = BeitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any] ):
__lowercase = self.num_labels
__lowercase = BeitForSemanticSegmentation(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = BeitModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowercase__ ), BeitForMaskedImageModeling]:
continue
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = model(**lowercase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowercase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = model(**lowercase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = BeitModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# prepare bool_masked_pos
__lowercase = torch.ones((1, 1_9_6) ,dtype=torch.bool ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(pixel_values=lowercase__ ,bool_masked_pos=lowercase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,lowercase__ ,atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape ,lowercase__ )
__lowercase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape ,lowercase__ )
__lowercase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowercase = model.to(lowercase__ )
__lowercase = BeitImageProcessor(do_resize=lowercase__ ,size=6_4_0 ,do_center_crop=lowercase__ )
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
__lowercase = Image.open(ds[0]['''file'''] )
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape ,lowercase__ )
__lowercase = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
__lowercase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] ,device=lowercase__ ,)
else:
__lowercase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] ,device=lowercase__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowercase = model.to(lowercase__ )
__lowercase = BeitImageProcessor(do_resize=lowercase__ ,size=6_4_0 ,do_center_crop=lowercase__ )
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''' )
__lowercase = Image.open(ds[0]['''file'''] )
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowercase__ ,target_sizes=[(5_0_0, 3_0_0)] )
__lowercase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape ,lowercase__ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowercase__ )
__lowercase = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape ,lowercase__ )
| 624 |
'''simple docstring'''
import random
from typing import Any
def _A ( A__ ):
"""simple docstring"""
for _ in range(len(A__ ) ):
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 624 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,**lowercase__ : Optional[Any] ):
super().__init__(**lowercase__ )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : Optional[int] ,lowercase__ : Union[np.ndarray, bytes, str] ,**lowercase__ : int ):
return super().__call__(lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,**lowercase__ : Tuple ):
__lowercase = {}
if "candidate_labels" in kwargs:
__lowercase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__lowercase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any]=None ,lowercase__ : str="This is a sound of {}." ):
if isinstance(lowercase__ ,lowercase__ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowercase = requests.get(lowercase__ ).content
else:
with open(lowercase__ ,'''rb''' ) as f:
__lowercase = f.read()
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = ffmpeg_read(lowercase__ ,self.feature_extractor.sampling_rate )
if not isinstance(lowercase__ ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__lowercase = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
__lowercase = candidate_labels
__lowercase = [hypothesis_template.format(lowercase__ ) for x in candidate_labels]
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework ,padding=lowercase__ )
__lowercase = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ):
__lowercase = model_inputs.pop('''candidate_labels''' )
__lowercase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,lowercase__ ):
__lowercase = text_inputs[0]
else:
# Batching case.
__lowercase = text_inputs[0][0]
__lowercase = self.model(**lowercase__ ,**lowercase__ )
__lowercase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = model_outputs.pop('''candidate_labels''' )
__lowercase = model_outputs['''logits'''][0]
if self.framework == "pt":
__lowercase = logits.softmax(dim=0 )
__lowercase = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__lowercase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase__ ,lowercase__ ) ,key=lambda lowercase__ : -x[0] )
]
return result
| 624 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
lowerCAmelCase__ = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
lowerCAmelCase__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
lowerCAmelCase__ = {'''mustc''': MUSTC_LANGS}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE : int = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple="<s>" ,lowercase__ : List[str]="</s>" ,lowercase__ : str="<pad>" ,lowercase__ : Optional[Any]="<unk>" ,lowercase__ : Dict=False ,lowercase__ : List[Any]=False ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Optional[Any] ,):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,do_upper_case=lowercase__ ,do_lower_case=lowercase__ ,tgt_lang=lowercase__ ,lang_codes=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = do_upper_case
__lowercase = do_lower_case
__lowercase = load_json(lowercase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = spm_file
__lowercase = load_spm(lowercase__ ,self.sp_model_kwargs )
if lang_codes is not None:
__lowercase = lang_codes
__lowercase = LANGUAGES[lang_codes]
__lowercase = [F"<lang:{lang}>" for lang in self.langs]
__lowercase = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
__lowercase = self.lang_tokens
__lowercase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowercase = {}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
__lowercase = self.lang_code_to_id[tgt_lang]
__lowercase = [lang_code_id]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ):
return self.encoder.get(lowercase__ ,self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ):
return self.decoder.get(lowercase__ ,self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowercase = self.sp_model.decode(lowercase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = self.sp_model.decode(lowercase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[int]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Tuple ,lowercase__ : Dict ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = load_spm(self.spm_file ,self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = Path(lowercase__ )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
__lowercase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__lowercase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder ,lowercase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowercase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (str(lowercase__ ), str(lowercase__ ))
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = sentencepiece.SentencePieceProcessor(**A__ )
spm.Load(str(A__ ) )
return spm
def _A ( A__ ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
return json.load(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
with open(A__ , '''w''' ) as f:
json.dump(A__ , A__ , indent=2 )
| 624 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 | 1 |
'''simple docstring'''
from math import isqrt
def _A ( A__ ):
"""simple docstring"""
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
__lowercase = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def _A ( A__ = 10**8 ):
"""simple docstring"""
__lowercase = calculate_prime_numbers(max_number // 2 )
__lowercase = 0
__lowercase = 0
__lowercase = len(A__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = np.shape(A__ )
if rows != columns:
__lowercase = (
'''\'table\' has to be of square shaped array but got a '''
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(A__ )
__lowercase = np.zeros((rows, columns) )
__lowercase = np.zeros((rows, columns) )
for i in range(A__ ):
for j in range(A__ ):
__lowercase = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__lowercase = (table[i][j] - total) / upper[j][j]
__lowercase = 1
for j in range(A__ , A__ ):
__lowercase = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
__lowercase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase__ = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
SCREAMING_SNAKE_CASE : Union[str, Any] = 'translator'
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer
SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : List[Any] = LANGUAGE_CODES
SCREAMING_SNAKE_CASE : int = ['text', 'text', 'text']
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Dict ):
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
__lowercase = self.lang_to_code[src_lang]
__lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase__ ,return_tensors='''pt''' ,src_lang=lowercase__ ,tgt_lang=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ):
return self.model.generate(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ):
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=lowercase__ )
| 624 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 624 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : Optional[int] = GPTaTokenizer
def __init__( self : int ,lowercase__ : List[str]=None ,lowercase__ : List[Any]=None ,lowercase__ : Any=None ,lowercase__ : List[str]="<|endoftext|>" ,lowercase__ : List[Any]="<|endoftext|>" ,lowercase__ : int="<|endoftext|>" ,lowercase__ : Optional[Any]=False ,**lowercase__ : Dict ,):
super().__init__(
lowercase__ ,lowercase__ ,tokenizer_file=lowercase__ ,unk_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,add_prefix_space=lowercase__ ,**lowercase__ ,)
__lowercase = kwargs.pop('''add_bos_token''' ,lowercase__ )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,lowercase__ ) != add_prefix_space:
__lowercase = getattr(lowercase__ ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase__ )
__lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : Tuple ,*lowercase__ : Optional[int] ,**lowercase__ : Dict ):
__lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,*lowercase__ : Tuple ,**lowercase__ : Union[str, Any] ):
__lowercase = kwargs.get('''is_split_into_words''' ,lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ )
return tuple(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : "Conversation" ):
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ ,add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 624 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict=1_3 ,lowercase__ : Any=7 ,lowercase__ : Optional[Any]=True ,lowercase__ : Tuple=True ,lowercase__ : List[str]=True ,lowercase__ : Optional[int]=True ,lowercase__ : int=9_9 ,lowercase__ : List[str]=3_2 ,lowercase__ : Any=5 ,lowercase__ : Union[str, Any]=4 ,lowercase__ : Tuple=3_7 ,lowercase__ : int="gelu" ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Union[str, Any]=1_6 ,lowercase__ : Optional[Any]=2 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=4 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('''roberta-base''' ,from_pt=lowercase__ )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = Path(A__ )
__lowercase = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
__lowercase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowercase = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open('''w''' ).write('''\n'''.join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : List[str]=1_3 ,lowercase__ : str=7 ,lowercase__ : Optional[Any]=False ,lowercase__ : List[str]=True ,lowercase__ : Any=False ,lowercase__ : List[str]=True ,lowercase__ : Union[str, Any]=3_3 ,lowercase__ : str=3_2 ,lowercase__ : Any=5 ,lowercase__ : str=4 ,lowercase__ : List[Any]=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : str=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : List[str]=5_1_2 ,lowercase__ : Tuple=1_6 ,lowercase__ : Dict=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Optional[Any]=3 ,lowercase__ : List[str]=4 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = EsmModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : Tuple ):
__lowercase = EsmForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = EsmForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = ()
SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = EsmModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = EsmModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()[0]
__lowercase = EsmEmbeddings(config=lowercase__ )
__lowercase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
__lowercase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase = create_position_ids_from_input_ids(lowercase__ ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase__ ,lowercase__ ) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()[0]
__lowercase = EsmEmbeddings(config=lowercase__ )
__lowercase = torch.empty(2 ,4 ,3_0 )
__lowercase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase = embeddings.create_position_ids_from_inputs_embeds(lowercase__ )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase__ ,lowercase__ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@require_torch
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with torch.no_grad():
__lowercase = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowercase__ )[0]
__lowercase = 3_3
__lowercase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
with torch.no_grad():
__lowercase = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
__lowercase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowercase = model(lowercase__ )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Optional[Any] = 'BridgeTowerImageProcessor'
SCREAMING_SNAKE_CASE : Dict = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[str] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase__ : bool = True ,lowercase__ : Union[bool, str, PaddingStrategy] = False ,lowercase__ : Union[bool, str, TruncationStrategy] = None ,lowercase__ : Optional[int] = None ,lowercase__ : int = 0 ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = True ,lowercase__ : Optional[Union[str, TensorType]] = None ,**lowercase__ : int ,):
__lowercase = self.tokenizer(
text=lowercase__ ,add_special_tokens=lowercase__ ,padding=lowercase__ ,truncation=lowercase__ ,max_length=lowercase__ ,stride=lowercase__ ,pad_to_multiple_of=lowercase__ ,return_token_type_ids=lowercase__ ,return_attention_mask=lowercase__ ,return_overflowing_tokens=lowercase__ ,return_special_tokens_mask=lowercase__ ,return_offsets_mapping=lowercase__ ,return_length=lowercase__ ,verbose=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ,)
# add pixel_values + pixel_mask
__lowercase = self.image_processor(
lowercase__ ,return_tensors=lowercase__ ,do_normalize=lowercase__ ,do_center_crop=lowercase__ ,**lowercase__ )
encoding.update(lowercase__ )
return encoding
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : List[Any] ,**lowercase__ : List[str] ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*lowercase__ : Any ,**lowercase__ : List[str] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
for ch in input_str:
__lowercase = ord(A__ )
__lowercase = pow(2 , A__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ): # noqa: E741
"""simple docstring"""
__lowercase = len(A__ )
__lowercase = 0
__lowercase = [0] * n
__lowercase = [False] * n
__lowercase = [False] * n
def dfs(A__ , A__ , A__ , A__ ):
if parent == root:
out_edge_count += 1
__lowercase = True
__lowercase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__lowercase = dfs(A__ , A__ , A__ , A__ )
__lowercase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__lowercase = True
# AP found via cycle
if at == low[to]:
__lowercase = True
else:
__lowercase = min(low[at] , A__ )
return out_edge_count
for i in range(A__ ):
if not visited[i]:
__lowercase = 0
__lowercase = dfs(A__ , A__ , -1 , A__ )
__lowercase = out_edge_count > 1
for x in range(len(A__ ) ):
if is_art[x] is True:
print(A__ )
# Adjacency list of graph
lowerCAmelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 624 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : Any=True ,lowercase__ : Tuple=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Dict=5 ,lowercase__ : Optional[Any]=4 ,lowercase__ : List[str]=4 ,lowercase__ : str="gelu" ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : int=0.1 ,lowercase__ : Optional[Any]=True ,lowercase__ : Dict=5_1_2 ,lowercase__ : Tuple=1_6 ,lowercase__ : List[str]=2 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : List[str]=4 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = weight_tying
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ):
__lowercase = GPTNeoXJapaneseModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : Optional[int] ):
__lowercase = True
__lowercase = GPTNeoXJapaneseModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = GPTNeoXJapaneseForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any] ):
__lowercase = True
__lowercase = GPTNeoXJapaneseForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,use_cache=lowercase__ )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,output_hidden_states=lowercase__ )
__lowercase = output_from_no_past['''hidden_states'''][0]
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ ,output_hidden_states=lowercase__ ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = GPTNeoXJapaneseModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# This regression test was failing with PyTorch < 1.3
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''abeja/gpt-neox-japanese-2.7b'''
__lowercase = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__lowercase = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__lowercase = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase__ )
__lowercase = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase__ )
__lowercase = []
for prompt in prompts:
__lowercase = tokenizer(lowercase__ ,return_tensors='''pt''' ).input_ids
__lowercase = model.generate(lowercase__ ,max_length=5_0 )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
predicted_outputs += generated_string
self.assertListEqual(lowercase__ ,lowercase__ )
| 624 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = '''examples/'''
lowerCAmelCase__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase__ = '''README.md'''
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.read()
__lowercase , __lowercase = REPLACE_PATTERNS[pattern]
__lowercase = replace.replace('''VERSION''' , A__ )
__lowercase = re_pattern.sub(A__ , A__ )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A__ )
def _A ( A__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern='''examples''' )
def _A ( A__ , A__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def _A ( ):
"""simple docstring"""
__lowercase = '''🤗 Transformers currently provides the following architectures'''
__lowercase = '''1. Want to contribute a new model?'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Find the start of the list.
__lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__lowercase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
def _A ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__lowercase = f.read()
__lowercase = REPLACE_PATTERNS['''init'''][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def _A ( A__=False ):
"""simple docstring"""
__lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__lowercase = default_version.base_version
elif patch:
__lowercase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowercase = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowercase = input(F"Which version are you releasing? [{default_version}]" )
if len(A__ ) == 0:
__lowercase = default_version
print(F"Updating version to {version}." )
global_version_update(A__ , patch=A__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A ( ):
"""simple docstring"""
__lowercase = get_version()
__lowercase = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowercase = current_version.base_version
# Check with the user we got that right.
__lowercase = input(F"Which version are we developing now? [{dev_version}]" )
if len(A__ ) == 0:
__lowercase = dev_version
print(F"Updating version to {version}." )
global_version_update(A__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 624 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values']
def __init__( self : List[str] ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = True ,**lowercase__ : str ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''shortest_edge''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Union[str, Any] ,):
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[str] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : int ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : str ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : str ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ ,param_name='''size''' ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' ,default_to_square=lowercase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
import numpy as np
def _A ( A__ , A__ , A__ = 1e-12 , A__ = 100 , ):
"""simple docstring"""
assert np.shape(A__ )[0] == np.shape(A__ )[1]
# Ensure proper dimensionality.
assert np.shape(A__ )[0] == np.shape(A__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A__ ) == np.iscomplexobj(A__ )
__lowercase = np.iscomplexobj(A__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowercase = False
__lowercase = 0
__lowercase = 0
__lowercase = 1e12
while not convergence:
# Multiple matrix by the vector.
__lowercase = np.dot(A__ , A__ )
# Normalize the resulting output vector.
__lowercase = w / np.linalg.norm(A__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowercase = vector.conj().T if is_complex else vector.T
__lowercase = np.dot(A__ , np.dot(A__ , A__ ) )
# Check convergence.
__lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowercase = True
__lowercase = lambda_
if is_complex:
__lowercase = np.real(lambda_ )
return lambda_, vector
def _A ( ):
"""simple docstring"""
__lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowercase = np.array([41, 4, 20] )
__lowercase = real_input_matrix.astype(np.complexaaa )
__lowercase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowercase = real_input_matrix
__lowercase = real_vector
elif problem_type == "complex":
__lowercase = complex_input_matrix
__lowercase = complex_vector
# Our implementation.
__lowercase , __lowercase = power_iteration(A__ , A__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowercase , __lowercase = np.linalg.eigh(A__ )
# Last eigenvalue is the maximum one.
__lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A__ ) - np.abs(A__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 624 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 624 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
print('''Processing...''' )
__lowercase , __lowercase , __lowercase = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(A__ )} with {file_name}" )
__lowercase = []
for anno in new_annos[index]:
__lowercase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(A__ )
with open(F"/{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ = 1 ):
"""simple docstring"""
__lowercase = []
__lowercase = []
__lowercase = []
for idx in range(len(A__ ) ):
__lowercase = []
__lowercase = img_list[idx]
path_list.append(A__ )
__lowercase = anno_list[idx]
__lowercase = cva.imread(A__ )
if flip_type == 1:
__lowercase = cva.flip(A__ , A__ )
for bbox in img_annos:
__lowercase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowercase = cva.flip(A__ , A__ )
for bbox in img_annos:
__lowercase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def _A ( A__ = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'mobilenet_v1'
def __init__( self : Tuple ,lowercase__ : Tuple=3 ,lowercase__ : List[Any]=2_2_4 ,lowercase__ : str=1.0 ,lowercase__ : List[str]=8 ,lowercase__ : Tuple="relu6" ,lowercase__ : Optional[Any]=True ,lowercase__ : int=0.9_9_9 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : List[str]=0.0_0_1 ,**lowercase__ : Union[str, Any] ,):
super().__init__(**lowercase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = min_depth
__lowercase = hidden_act
__lowercase = tf_padding
__lowercase = classifier_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return 1e-4
| 624 |
'''simple docstring'''
import random
from typing import Any
def _A ( A__ ):
"""simple docstring"""
for _ in range(len(A__ ) ):
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase = random.randint(0 , len(A__ ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 624 | 1 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ):
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : int ):
if not self.initialized:
__lowercase = RagRetriever(
lowercase__ ,question_encoder_tokenizer=lowercase__ ,generator_tokenizer=lowercase__ ,index=lowercase__ ,init_retrieval=lowercase__ ,)
__lowercase = True
def SCREAMING_SNAKE_CASE ( self : Any ):
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ,lowercase__ : Union[str, Any] ):
__lowercase , __lowercase = self.retriever._main_retrieve(lowercase__ ,lowercase__ )
return doc_ids, retrieved_doc_embeds
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any]=None ):
if index is not None and index.is_initialized() and len(lowercase__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
lowercase__ ,question_encoder_tokenizer=lowercase__ ,generator_tokenizer=lowercase__ ,index=lowercase__ ,init_retrieval=lowercase__ ,)
__lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE ( self : Any ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase = ray.get(random_worker.retrieve.remote(lowercase__ ,lowercase__ ) )
else:
__lowercase , __lowercase = self._main_retrieve(lowercase__ ,lowercase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] ,lowercase__ : Any ,lowercase__ : int=None ,**lowercase__ : List[Any] ):
return super(lowercase__ ,cls ).get_tokenizers(lowercase__ ,lowercase__ ,**lowercase__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,lowercase__ : int ,lowercase__ : int ,lowercase__ : Any=None ,**lowercase__ : Optional[int] ):
__lowercase = kwargs.pop('''config''' ,lowercase__ ) or RagConfig.from_pretrained(lowercase__ ,**lowercase__ )
__lowercase = RagTokenizer.from_pretrained(lowercase__ ,config=lowercase__ )
__lowercase = rag_tokenizer.question_encoder
__lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase = '''custom'''
__lowercase = CustomHFIndex(config.retrieval_vector_size ,lowercase__ )
else:
__lowercase = cls._build_index(lowercase__ )
return cls(
lowercase__ ,question_encoder_tokenizer=lowercase__ ,generator_tokenizer=lowercase__ ,retrieval_workers=lowercase__ ,index=lowercase__ ,)
| 624 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 624 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = list(A__ )
__lowercase = list(A__ )
__lowercase = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count += 1
__lowercase = '''_'''
if count > 1:
return False
else:
return "".join(A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
while True:
__lowercase = ['''$'''] * len(A__ )
__lowercase = []
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
__lowercase = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase = '''*'''
__lowercase = '''*'''
temp.append('''X''' )
for i in range(len(A__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A__ ) == 0:
return pi
__lowercase = list(set(A__ ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
for minterm in minterms:
__lowercase = ''''''
for _ in range(A__ ):
__lowercase = str(minterm % 2 ) + string
minterm //= 2
temp.append(A__ )
return temp
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = list(A__ )
__lowercase = list(A__ )
__lowercase = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = [0] * len(A__ )
for i in range(len(chart[0] ) ):
__lowercase = 0
__lowercase = -1
for j in range(len(A__ ) ):
if chart[j][i] == 1:
count += 1
__lowercase = j
if count == 1:
__lowercase = 1
for i in range(len(A__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A__ ) ):
__lowercase = 0
temp.append(prime_implicants[i] )
while True:
__lowercase = 0
__lowercase = -1
__lowercase = 0
for i in range(len(A__ ) ):
__lowercase = chart[i].count(1 )
if count_n > max_n:
__lowercase = count_n
__lowercase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A__ ) ):
__lowercase = 0
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]
for i in range(len(A__ ) ):
__lowercase = prime_implicants[i].count('''_''' )
for j in range(len(A__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , A__ ):
__lowercase = 1
return chart
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Enter the no. of variables\n''' ) )
__lowercase = [
float(A__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__lowercase = decimal_to_binary(A__ , A__ )
__lowercase = check(A__ )
print('''Prime Implicants are:''' )
print(A__ )
__lowercase = prime_implicant_chart(A__ , A__ )
__lowercase = selection(A__ , A__ )
print('''Essential Prime Implicants are:''' )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'xmod'
def __init__( self : Dict ,lowercase__ : str=3_0_5_2_2 ,lowercase__ : List[Any]=7_6_8 ,lowercase__ : List[Any]=1_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Tuple=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Dict=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : List[Any]=2 ,lowercase__ : int=0.0_2 ,lowercase__ : int=1e-1_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[str]=0 ,lowercase__ : Dict=2 ,lowercase__ : List[str]="absolute" ,lowercase__ : Dict=True ,lowercase__ : Any=None ,lowercase__ : Optional[Any]=False ,lowercase__ : List[str]=2 ,lowercase__ : List[str]=False ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : Any=("en_XX",) ,lowercase__ : int=None ,**lowercase__ : str ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
__lowercase = pre_norm
__lowercase = adapter_reduction_factor
__lowercase = adapter_layer_norm
__lowercase = adapter_reuse_layer_norm
__lowercase = ln_before_adapter
__lowercase = list(lowercase__ )
__lowercase = default_language
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 624 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 624 | 1 |
'''simple docstring'''
from collections import deque
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(A__ )
__lowercase = deque()
__lowercase = [False for _ in range(A__ )]
__lowercase = [-1 for _ in range(A__ )]
__lowercase = index_of[:]
def strong_connect(A__ , A__ , A__ ):
__lowercase = index # the number when this node is seen
__lowercase = index # lowest rank node reachable from here
index += 1
stack.append(A__ )
__lowercase = True
for w in g[v]:
if index_of[w] == -1:
__lowercase = strong_connect(A__ , A__ , A__ )
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowercase = []
__lowercase = stack.pop()
__lowercase = False
component.append(A__ )
while w != v:
__lowercase = stack.pop()
__lowercase = False
component.append(A__ )
components.append(A__ )
return index
__lowercase = []
for v in range(A__ ):
if index_of[v] == -1:
strong_connect(A__ , 0 , A__ )
return components
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [[] for _ in range(A__ )]
for u, v in edges:
g[u].append(A__ )
return g
if __name__ == "__main__":
# Test
lowerCAmelCase__ = 7
lowerCAmelCase__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCAmelCase__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCAmelCase__ = [(u, v) for u, v in zip(source, target)]
lowerCAmelCase__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 624 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] ,lowercase__ : int ,lowercase__ : Optional[Any]="<s>" ,lowercase__ : Optional[int]="</s>" ,lowercase__ : List[str]="</s>" ,lowercase__ : Union[str, Any]="<s>" ,lowercase__ : Dict="<unk>" ,lowercase__ : List[str]="<pad>" ,lowercase__ : str="<mask>" ,lowercase__ : Tuple=None ,lowercase__ : Optional[Any]=None ,lowercase__ : int=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : str=None ,**lowercase__ : List[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,tokenizer_file=lowercase__ ,src_lang=lowercase__ ,tgt_lang=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
__lowercase = len(self.sp_model )
__lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
__lowercase = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase = src_lang if src_lang is not None else '''en_XX'''
__lowercase = self.lang_code_to_id[self._src_lang]
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ):
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Optional[str] ,lowercase__ : Optional[str] ,**lowercase__ : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowercase = src_lang
__lowercase = self(lowercase__ ,add_special_tokens=lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
__lowercase = self.convert_tokens_to_ids(lowercase__ )
__lowercase = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : int ):
__lowercase = ''''''.join(lowercase__ ).replace(lowercase__ ,''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[str] ,lowercase__ : str = "en_XX" ,lowercase__ : Optional[List[str]] = None ,lowercase__ : str = "ro_RO" ,**lowercase__ : Union[str, Any] ,):
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ ,lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ):
__lowercase = self.lang_code_to_id[src_lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ):
__lowercase = self.lang_code_to_id[lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
| 624 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : str = "cpu" ,lowercase__ : str = "openai/clip-vit-large-patch14" ):
__lowercase = device
__lowercase = CLIPTokenizerFast.from_pretrained(lowercase__ )
__lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__lowercase = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
__lowercase = torchvision.transforms.Resize(2_2_4 )
__lowercase = torchvision.transforms.CenterCrop(2_2_4 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ):
__lowercase = self.resize(lowercase__ )
__lowercase = self.center_crop(lowercase__ )
__lowercase = self.normalize(lowercase__ )
return images
def __call__( self : Dict ,lowercase__ : str=None ,lowercase__ : Optional[int]=None ,**lowercase__ : List[str] ):
__lowercase = self.tokenizer(text=lowercase__ ,**lowercase__ )
__lowercase = self.preprocess_img(lowercase__ )
__lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : str=1_0 ,lowercase__ : str=0.0_1 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Any=None ,lowercase__ : Optional[int]=None ,lowercase__ : str=None ,lowercase__ : List[str]=False ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]="image" ,lowercase__ : Tuple=True ,lowercase__ : int=False ,lowercase__ : Tuple=False ,lowercase__ : Optional[Any]=False ,):
super().__init__()
__lowercase = None
__lowercase = device if device else get_device()
if vqgan:
__lowercase = vqgan
else:
__lowercase = load_vqgan(self.device ,conf_path=lowercase__ ,ckpt_path=lowercase__ )
self.vqgan.eval()
if clip:
__lowercase = clip
else:
__lowercase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowercase = ProcessorGradientFlow(device=self.device )
__lowercase = iterations
__lowercase = lr
__lowercase = log
__lowercase = make_grid
__lowercase = return_val
__lowercase = quantize
__lowercase = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int]=None ,lowercase__ : str=None ,lowercase__ : Dict=5 ,lowercase__ : Tuple=True ):
__lowercase = []
if output_path is None:
__lowercase = '''./animation.gif'''
if input_path is None:
__lowercase = self.save_path
__lowercase = sorted(glob(input_path + '''/*''' ) )
if not len(lowercase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowercase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowercase = total_duration / len(lowercase__ )
__lowercase = [frame_duration] * len(lowercase__ )
if extend_frames:
__lowercase = 1.5
__lowercase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowercase__ ) )
imageio.mimsave(lowercase__ ,lowercase__ ,duration=lowercase__ )
print(F"gif saved to {output_path}" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Any=None ,lowercase__ : Tuple=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowercase = preprocess(Image.open(lowercase__ ) ,target_image_size=2_5_6 ).to(self.device )
__lowercase = preprocess_vqgan(lowercase__ )
__lowercase , *__lowercase = self.vqgan.encode(lowercase__ )
return z
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any] ):
__lowercase = self.latent.detach().requires_grad_()
__lowercase = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase = self.vqgan.quantize(lowercase__ )
else:
__lowercase = trans_latent
return self.vqgan.decode(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict=None ):
__lowercase = self.clip_preprocessor(text=lowercase__ ,images=lowercase__ ,return_tensors='''pt''' ,padding=lowercase__ )
__lowercase = self.clip(**lowercase__ )
__lowercase = clip_outputs.logits_per_image
if weights is not None:
__lowercase = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ):
__lowercase = self._get_clip_similarity(pos_prompts['''prompts'''] ,lowercase__ ,weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowercase = self._get_clip_similarity(neg_prompts['''prompts'''] ,lowercase__ ,weights=neg_prompts['''weights'''] )
else:
__lowercase = torch.tensor([1] ,device=self.device )
__lowercase = -torch.log(lowercase__ ) + torch.log(lowercase__ )
return loss
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ):
__lowercase = torch.randn_like(self.latent ,requires_grad=lowercase__ ,device=self.device )
__lowercase = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase = self._add_vector(lowercase__ )
__lowercase = loop_post_process(lowercase__ )
__lowercase = self._get_CLIP_loss(lowercase__ ,lowercase__ ,lowercase__ )
print('''CLIP loss''' ,lowercase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowercase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : List[str] ):
wandb.init(reinit=lowercase__ ,project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowercase = Image.open(lowercase__ )
__lowercase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' ,wandb.Image(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if not prompts:
return []
__lowercase = []
__lowercase = []
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowercase__ ,(tuple, list) ):
__lowercase = prompt[0]
__lowercase = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase = prompt.split(''':''' )
__lowercase = float(lowercase__ )
else:
__lowercase = prompt
__lowercase = 1.0
processed_prompts.append(lowercase__ )
weights.append(lowercase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ ,device=self.device ),
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[int]=None ,lowercase__ : List[Any]=True ,lowercase__ : List[str]=False ,lowercase__ : int=True ,lowercase__ : Optional[int]=True ,lowercase__ : Dict=None ,):
if image_path:
__lowercase = self._get_latent(lowercase__ )
else:
__lowercase = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(lowercase__ ,lowercase__ ,lowercase__ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase = self.process_prompts(lowercase__ )
__lowercase = self.process_prompts(lowercase__ )
if save_final and save_path is None:
__lowercase = os.path.join('''./outputs/''' ,'''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
__lowercase = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase__ )
__lowercase = save_path
__lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowercase__ ) )
__lowercase = loop_post_process(lowercase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ ,lowercase__ ,lowercase__ ) ):
if show_intermediate:
show_pil(lowercase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase__ )} )
if show_final:
show_pil(lowercase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}_final.png" ) )
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ):
__lowercase = psutil.Process()
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = -1
while True:
__lowercase = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = True
__lowercase = threading.Thread(target=self.peak_monitor )
__lowercase = True
self.thread.start()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = False
self.thread.join()
return self.cpu_memory_peak
lowerCAmelCase__ = PeakCPUMemory()
def _A ( ):
"""simple docstring"""
__lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = torch.cuda.memory_allocated(A__ )
torch.cuda.reset_peak_memory_stats()
return measures
def _A ( A__ ):
"""simple docstring"""
__lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = (torch.cuda.memory_allocated(A__ ) - start_measures[str(A__ )]) / 2**20
__lowercase = (torch.cuda.max_memory_allocated(A__ ) - start_measures[str(A__ )]) / 2**20
return measures
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(A__ )]:.2f}MiB" )
__lowercase = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 624 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowercase = [[1, 2, 4], [1, 2, 3, 4]]
__lowercase = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids ,lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = [[1, 2, 3], [1, 2, 4]]
__lowercase = DisjunctiveConstraint(lowercase__ )
__lowercase , __lowercase , __lowercase = dc.update(1 )
__lowercase = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowercase , __lowercase , __lowercase = dc.update(2 )
__lowercase = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowercase , __lowercase , __lowercase = dc.update(3 )
__lowercase = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowercase = DisjunctiveConstraint(lowercase__ )
__lowercase , __lowercase , __lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowercase , __lowercase , __lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowercase , __lowercase , __lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowercase , __lowercase , __lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowercase , __lowercase , __lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowercase , __lowercase , __lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowercase , __lowercase , __lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 624 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
for index in range(A__ ):
__lowercase = random.sample(range(len(A__ ) ) , 4 )
__lowercase , __lowercase , __lowercase = update_image_and_anno(
A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowercase = []
for anno in new_annos:
__lowercase = anno[3] - anno[1]
__lowercase = anno[4] - anno[2]
__lowercase = anno[1] + width / 2
__lowercase = anno[2] + height / 2
__lowercase = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(A__ )
with open(F"{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowercase = float(obj[1] ) - float(obj[3] ) / 2
__lowercase = float(obj[2] ) - float(obj[4] ) / 2
__lowercase = float(obj[1] ) + float(obj[3] ) / 2
__lowercase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ):
"""simple docstring"""
__lowercase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowercase = int(scale_x * output_size[1] )
__lowercase = int(scale_y * output_size[0] )
__lowercase = []
__lowercase = []
for i, index in enumerate(A__ ):
__lowercase = all_img_list[index]
path_list.append(A__ )
__lowercase = all_annos[index]
__lowercase = cva.imread(A__ )
if i == 0: # top-left
__lowercase = cva.resize(A__ , (divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = bbox[2] * scale_y
__lowercase = bbox[3] * scale_x
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowercase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = bbox[2] * scale_y
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowercase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = bbox[1] * scale_x
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = bbox[3] * scale_x
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowercase = cva.resize(
A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowercase = img
for bbox in img_annos:
__lowercase = scale_x + bbox[1] * (1 - scale_x)
__lowercase = scale_y + bbox[2] * (1 - scale_y)
__lowercase = scale_x + bbox[3] * (1 - scale_x)
__lowercase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowercase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( A__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__lowercase = len(bin(A__ )[3:] )
__lowercase = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
__lowercase = (
(
'''1'''
+ '''0''' * (binary_number_length - len(A__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
from itertools import product
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = sides_number
__lowercase = max_face_number * dice_number
__lowercase = [0] * (max_total + 1)
__lowercase = 1
__lowercase = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
__lowercase = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def _A ( ):
"""simple docstring"""
__lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowercase = 0
__lowercase = 9
__lowercase = 4 * 9
__lowercase = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowercase = (4**9) * (6**6)
__lowercase = peter_wins_count / total_games_number
__lowercase = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _A ( A__ , A__ , A__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase = ''''''
else:
__lowercase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def _A ( A__ ):
"""simple docstring"""
__lowercase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = dct.pop(A__ )
__lowercase = val
def _A ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _A ( A__ , A__ , A__=True ):
"""simple docstring"""
__lowercase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowercase = 8
# set labels if required
if not base_model:
__lowercase = 1000
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(A__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowercase = 384
__lowercase = 1536
__lowercase = 12
__lowercase = 6
# load original model from torch hub
__lowercase = torch.hub.load('''facebookresearch/dino:main''' , A__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase = original_model.state_dict()
if base_model:
remove_classification_head_(A__ )
__lowercase = create_rename_keys(A__ , base_model=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if base_model:
__lowercase = ViTModel(A__ , add_pooling_layer=A__ ).eval()
else:
__lowercase = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowercase = ViTImageProcessor()
__lowercase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase = encoding['''pixel_values''']
__lowercase = model(A__ )
if base_model:
__lowercase = original_model(A__ )
assert torch.allclose(A__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__lowercase = original_model(A__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 624 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str]=None ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Dict ):
__lowercase = parent
__lowercase = config_class
__lowercase = has_text_modality
__lowercase = kwargs
__lowercase = common_properties
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ ,lowercase__ ) ,msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__ ):
try:
setattr(lowercase__ ,lowercase__ ,lowercase__ )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__ ):
try:
__lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase__ ,lowercase__ ) ,lowercase__ ,msg=F"`{name} value {idx} expected, but was {getattr(lowercase__ ,lowercase__ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''config.json''' )
config_first.to_json_file(lowercase__ )
__lowercase = self.config_class.from_json_file(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
__lowercase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,lowercase__ )
config_first.save_pretrained(lowercase__ )
__lowercase = self.config_class.from_pretrained(lowercase__ ,subfolder=lowercase__ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
__lowercase = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_class.is_composition:
return
__lowercase = self.config_class()
self.parent.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(lowercase__ )
__lowercase = self.config_class(**lowercase__ )
__lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowercase__ ,lowercase__ ) != value:
wrong_values.append((key, getattr(lowercase__ ,lowercase__ ), value) )
if len(lowercase__ ) > 0:
__lowercase = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ , A__ , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 624 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'ctrl'
SCREAMING_SNAKE_CASE : str = ['past_key_values']
SCREAMING_SNAKE_CASE : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int ,lowercase__ : Any=2_4_6_5_3_4 ,lowercase__ : Optional[int]=2_5_6 ,lowercase__ : int=1_2_8_0 ,lowercase__ : Optional[int]=8_1_9_2 ,lowercase__ : Any=4_8 ,lowercase__ : Dict=1_6 ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : int=1e-6 ,lowercase__ : Tuple=0.0_2 ,lowercase__ : int=True ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = dff
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
super().__init__(**lowercase__ )
| 624 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624 | 1 |
'''simple docstring'''
import math
def _A ( A__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( A__ = 10001 ):
"""simple docstring"""
try:
__lowercase = int(A__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
__lowercase = []
__lowercase = 2
while len(A__ ) < nth:
if is_prime(A__ ):
primes.append(A__ )
num += 1
else:
num += 1
return primes[len(A__ ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 1.6_021e-19 # units = C
def _A ( A__ , A__ , A__ , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : NestedDataStructureLike[PathLike] ,lowercase__ : Optional[NamedSplit] = None ,lowercase__ : Optional[Features] = None ,lowercase__ : str = None ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : Optional[str] = None ,lowercase__ : Optional[int] = None ,**lowercase__ : Optional[int] ,):
super().__init__(
lowercase__ ,split=lowercase__ ,features=lowercase__ ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ,streaming=lowercase__ ,num_proc=lowercase__ ,**lowercase__ ,)
__lowercase = field
__lowercase = path_or_paths if isinstance(lowercase__ ,lowercase__ ) else {self.split: path_or_paths}
__lowercase = Json(
cache_dir=lowercase__ ,data_files=lowercase__ ,features=lowercase__ ,field=lowercase__ ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=lowercase__ ,download_mode=lowercase__ ,verification_mode=lowercase__ ,base_path=lowercase__ ,num_proc=self.num_proc ,)
__lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=lowercase__ ,in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Dataset ,lowercase__ : Union[PathLike, BinaryIO] ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,**lowercase__ : Any ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__lowercase = dataset
__lowercase = path_or_buf
__lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase = num_proc
__lowercase = '''utf-8'''
__lowercase = to_json_kwargs
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.to_json_kwargs.pop('''path_or_buf''' ,lowercase__ )
__lowercase = self.to_json_kwargs.pop('''orient''' ,'''records''' )
__lowercase = self.to_json_kwargs.pop('''lines''' ,True if orient == '''records''' else False )
__lowercase = self.to_json_kwargs.pop('''index''' ,False if orient in ['''split''', '''table'''] else True )
__lowercase = self.to_json_kwargs.pop('''compression''' ,lowercase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,'''wb''' ,compression=lowercase__ ) as buffer:
__lowercase = self._write(file_obj=lowercase__ ,orient=lowercase__ ,lines=lowercase__ ,index=lowercase__ ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
__lowercase = self._write(
file_obj=self.path_or_buf ,orient=lowercase__ ,lines=lowercase__ ,index=lowercase__ ,**self.to_json_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ):
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = args
__lowercase = query_table(
table=self.dataset.data ,key=slice(lowercase__ ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
__lowercase = batch.to_pandas().to_json(
path_or_buf=lowercase__ ,orient=lowercase__ ,lines=lowercase__ ,index=lowercase__ ,**lowercase__ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : BinaryIO ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,**lowercase__ : Any ,):
__lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating json from Arrow format''' ,):
__lowercase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase__ )
else:
__lowercase , __lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,lowercase__ ,lowercase__ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating json from Arrow format''' ,):
written += file_obj.write(lowercase__ )
return written
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.