code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCamelCase_ ) * abs(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
__lowercase = Vector()
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) ,'''(0,0,0,0,0,1)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) ,4 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2] )
__lowercase = Vector([1, 2, 3, 4, 5] )
__lowercase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowercase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.2_3_6 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.4_1_6 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.6_1_6 ,3 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([2, -1, 4] ) # for test of dot product
__lowercase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) ,0 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) ,10 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'''(0,1,0)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 2, 3] )
__lowercase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,_lowerCamelCase ,_lowerCamelCase ) ) ,'''(3,4,7)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 0, 0, 0, 0, 0] )
__lowercase = x.copy()
self.assertEqual(str(_lowerCamelCase ) ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(_lowerCamelCase ) ,'''(0,1,0)''' )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
__lowercase = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' ,str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' ,str(a * 2 ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' ,str(_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.0_1 )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' ,str(a + b ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
__lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' ,str(a - b ) )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCAmelCase ( lowerCamelCase_ : Union[dict, list, tuple, torch.Tensor] ):
__lowercase = []
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_ ) )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_ ) )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple[int, ...] ):
__lowercase = []
for d in reversed(lowerCamelCase_ ):
idx.append(flat_idx % d )
__lowercase = flat_idx // d
return tuple(reversed(lowerCamelCase_ ) )
@torch.jit.ignore
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[int] , lowerCamelCase_ : Sequence[int] , lowerCamelCase_ : Sequence[int] , lowerCamelCase_ : Optional[Sequence[bool]] = None , lowerCamelCase_ : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_ : List[bool] ) -> None:
__lowercase = True
for i in range(len(lowerCamelCase_ ) ):
__lowercase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowercase = l[reversed_idx]
if start_edges is None:
__lowercase = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_ )
if end_edges is None:
__lowercase = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_ )]
reduce_edge_list(lowerCamelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_ ) == 0:
return [()]
elif len(lowerCamelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowercase = []
__lowercase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_ ):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1 ) )
else:
break
__lowercase = tuple(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowercase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCAmelCase ( lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase = t.shape[:no_batch_dims]
__lowercase = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_ ) )
# _get_minimal_slice_set is inclusive
__lowercase = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_ ) )
# Get an ordered list of slices to perform
__lowercase = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
__lowercase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCAmelCase ( lowerCamelCase_ : Callable , lowerCamelCase_ : Dict[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : Any = None , lowerCamelCase_ : bool = False , ):
if not (len(lowerCamelCase_ ) > 0):
raise ValueError('''Must provide at least one input''' )
__lowercase = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_ )]
__lowercase = tuple([max(lowerCamelCase_ ) for s in zip(*lowerCamelCase_ )] )
def _prep_inputs(lowerCamelCase_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowercase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowercase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowercase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowercase = tensor_tree_map(_prep_inputs , lowerCamelCase_ )
__lowercase = None
if _out is not None:
__lowercase = tensor_tree_map(lambda lowerCamelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowercase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowercase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowercase = 0
__lowercase = prepped_outputs
for _ in range(lowerCamelCase_ ):
# Chunk the input
if not low_mem:
__lowercase = _select_chunk
else:
__lowercase = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size ) , no_batch_dims=len(lowerCamelCase_ ) , )
__lowercase = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_ )
# Run the layer on the chunk
__lowercase = layer(**lowerCamelCase_ )
# Allocate space for the output
if out is None:
__lowercase = tensor_tree_map(lambda lowerCamelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
def assign(lowerCamelCase_ : dict , lowerCamelCase_ : dict ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
assign(lowerCamelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowercase = da[k]
assign(lowerCamelCase_ , lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowercase = xa
elif isinstance(lowerCamelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowercase = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__lowercase = tensor_tree_map(lambda lowerCamelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase_ )
return out
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase = 512 ,) -> List[str]:
'''simple docstring'''
__lowercase = max_chunk_size
__lowercase = None
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowercase = [2**l for l in range(int(math.log(self.max_chunk_size ,2 ) ) + 1 )]
__lowercase = [c for c in candidates if c > min_chunk_size]
__lowercase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_lowerCamelCase ) -> bool:
try:
with torch.no_grad():
fn(*_lowerCamelCase ,chunk_size=_lowerCamelCase )
return True
except RuntimeError:
return False
__lowercase = 0
__lowercase = len(_lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
__lowercase = test_chunk_size(candidates[i] )
if not viable:
__lowercase = (min_viable_chunk_size_index + i) // 2
else:
__lowercase = i
__lowercase = (i + len(_lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> bool:
'''simple docstring'''
__lowercase = True
for aa, aa in zip(_lowerCamelCase ,_lowerCamelCase ):
assert type(_lowerCamelCase ) == type(_lowerCamelCase )
if isinstance(_lowerCamelCase ,(list, tuple) ):
consistent &= self._compare_arg_caches(_lowerCamelCase ,_lowerCamelCase )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [v for _, v in sorted(aa.items() ,key=lambda _lowerCamelCase : x[0] )]
__lowercase = [v for _, v in sorted(aa.items() ,key=lambda _lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_lowerCamelCase ,_lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = tree_map(lambda _lowerCamelCase : a.shape if isinstance(_lowerCamelCase ,torch.Tensor ) else a ,_lowerCamelCase ,_lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_lowerCamelCase )
__lowercase = self._compare_arg_caches(self.cached_arg_data ,_lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
__lowercase = False
if not consistent:
__lowercase = self._determine_favorable_chunk_size(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__lowercase = []
for num in range(len(lowerCamelCase_ ) ):
__lowercase = 0
while 2 * i * i <= odd_composites[num]:
__lowercase = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if hor == 1_2_8:
__lowercase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__lowercase = (3_2, 1_2_8, 2_5_6)
__lowercase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 3_2:
__lowercase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__lowercase = (3_2, 6_4, 1_2_8, 2_5_6)
__lowercase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
__lowercase = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__lowercase = model.state_dict()
__lowercase = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 1_4,
'''out_channels''': 1_4,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_5_5_3_6,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
__lowercase = UNetaDModel(**lowerCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( ):
__lowercase = {
'''in_channels''': 1_4,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (3_2, 6_4, 1_2_8, 2_5_6),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_5_5_3_6,
'''out_channels''': 1_4,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
__lowercase = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
__lowercase = model
__lowercase = UNetaDModel(**lowerCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_SCREAMING_SNAKE_CASE = 1_0_0
_SCREAMING_SNAKE_CASE = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_SCREAMING_SNAKE_CASE = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__lowercase = set()
__lowercase = 42
__lowercase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _lowerCAmelCase ( lowerCamelCase_ : int = 5_0_0_0 ):
for number_to_partition in range(1 , lowerCamelCase_ ):
if len(partition(lowerCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_SCREAMING_SNAKE_CASE = 2_5_6_0_4_7
_SCREAMING_SNAKE_CASE = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = NllbTokenizer
a : List[Any] = NllbTokenizerFast
a : Optional[int] = True
a : Optional[int] = True
a : List[str] = {}
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = NllbTokenizer(_lowerCamelCase ,keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = NllbTokenizer(_lowerCamelCase ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = self.tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowerCamelCase ,_lowerCamelCase )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(_lowerCamelCase )
__lowercase = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(_lowerCamelCase ,legacy_format=_lowerCamelCase )
__lowercase = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase ,_lowerCamelCase )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(_lowerCamelCase )
__lowercase = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(_lowerCamelCase ,legacy_format=_lowerCamelCase )
__lowercase = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(_lowerCamelCase )
__lowercase = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
if not self.test_seqaseq:
return
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
__lowercase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
__lowercase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
__lowercase = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase ,tgt_texts=_lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
__lowercase = tokenizer.prepare_seqaseq_batch(
_lowerCamelCase ,tgt_texts=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
__lowercase = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn('''decoder_input_ids''' ,_lowerCamelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = [AddedToken('''<special>''' ,lstrip=_lowerCamelCase )]
__lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = tokenizer_r.encode('''Hey this is a <special> token''' )
__lowercase = tokenizer_r.encode('''<special>''' ,add_special_tokens=_lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = self.tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = tokenizer_p.encode('''Hey this is a <special> token''' )
__lowercase = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : int = "facebook/nllb-200-distilled-600M"
a : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a : Any = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def _UpperCAmelCase (cls ) -> Optional[Any]:
'''simple docstring'''
__lowercase = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' )
__lowercase = 1
return cls
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] ,256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] ,256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] ,256057 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self.assertIn(_lowerCamelCase ,self.tokenizer.all_special_ids )
# fmt: off
__lowercase = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
__lowercase = self.tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] ,_lowerCamelCase )
__lowercase = 10
__lowercase = self.tokenizer(_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) ,[256203, 3] )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = NllbTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
__lowercase = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=10 ,return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(
_lowerCamelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[256047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256057,
} ,)
@require_torch
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = True
__lowercase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
__lowercase = False
__lowercase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ):
# Load configuration defined in the metadata file
with open(lowerCamelCase_ ) as metadata_file:
__lowercase = json.load(lowerCamelCase_ )
__lowercase = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
# Load the entity vocab file
__lowercase = load_entity_vocab(lowerCamelCase_ )
__lowercase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__lowercase = AddedToken('''<ent>''' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
__lowercase = AddedToken('''<ent2>''' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = LukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
__lowercase = state_dict['''embeddings.word_embeddings.weight''']
__lowercase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
__lowercase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
__lowercase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowercase = f"encoder.layer.{layer_index}.attention.self."
__lowercase = state_dict[prefix + matrix_name]
__lowercase = state_dict[prefix + matrix_name]
__lowercase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowercase = state_dict['''entity_embeddings.entity_embeddings.weight''']
__lowercase = entity_emb[entity_vocab['''[MASK]''']]
__lowercase = LukeModel(config=lowerCamelCase_ ).eval()
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if not (len(lowerCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(lowerCamelCase_ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
__lowercase = LukeTokenizer.from_pretrained(lowerCamelCase_ , task='''entity_classification''' )
__lowercase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
__lowercase = (3_9, 4_2)
__lowercase = tokenizer(lowerCamelCase_ , entity_spans=[span] , add_prefix_space=lowerCamelCase_ , return_tensors='''pt''' )
__lowercase = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
__lowercase = torch.Size((1, 4_2, 1_0_2_4) )
__lowercase = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
__lowercase = torch.Size((1, 4_2, 7_6_8) )
__lowercase = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowercase = torch.Size((1, 1, 1_0_2_4) )
__lowercase = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
__lowercase = torch.Size((1, 1, 7_6_8) )
__lowercase = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = {}
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowerCamelCase_ ):
__lowercase , __lowercase = line.rstrip().split('''\t''' )
__lowercase = index
return entity_vocab
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = True ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = size
__lowercase = resample
__lowercase = rescale_factor
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowercase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' ,default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
__lowercase = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
__lowercase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures''')
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=_lowerCamelCase ) as mock_head:
__lowercase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase (cls ) -> Optional[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _UpperCAmelCase (cls ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' ,use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase ,getattr(_lowerCamelCase ,_lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase ,repo_id='''test-feature-extractor''' ,push_to_hub=_lowerCamelCase ,use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase ,getattr(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' ,use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase ,getattr(_lowerCamelCase ,_lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase ,repo_id='''valid_org/test-feature-extractor-org''' ,push_to_hub=_lowerCamelCase ,use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase ,getattr(_lowerCamelCase ,_lowerCamelCase ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__lowercase = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} ,)
__lowercase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" ,trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,'''CustomFeatureExtractor''' )
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
def get_matched_characters(lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase_ )
__lowercase = f"{_stra[0:_stra.index(lowerCamelCase_ )]} {_stra[_stra.index(lowerCamelCase_ ) + 1:]}"
return "".join(lowerCamelCase_ )
# matching characters
__lowercase = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase_ , lowerCamelCase_ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(lowerCamelCase_ )
+ match_count / len(lowerCamelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowercase :
'''simple docstring'''
pass
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
import random
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> tuple[list[int], list[int]]:
'''simple docstring'''
__lowercase = [ord(_lowerCamelCase ) for i in text]
__lowercase = []
__lowercase = []
for i in plain:
__lowercase = random.randint(1 ,300 )
__lowercase = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
for i in range(len(_lowerCamelCase ) ):
__lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def _lowerCAmelCase ( ):
__lowercase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = 1_0_0_0
__lowercase = '''huggingface/label-files'''
__lowercase = num_labels
__lowercase = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
__lowercase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = __lowercase = CvtConfig(num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__lowercase = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__lowercase = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowercase = [2, 2, 2_0]
__lowercase = [3, 1_2, 1_6]
__lowercase = [1_9_2, 7_6_8, 1_0_2_4]
__lowercase = CvtForImageClassification(lowerCamelCase_ )
__lowercase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__lowercase = image_size
__lowercase = torch.load(lowerCamelCase_ , map_location=torch.device('''cpu''' ) )
__lowercase = OrderedDict()
__lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowercase = list_of_state_dict + cls_token(lowerCamelCase_ )
__lowercase = list_of_state_dict + embeddings(lowerCamelCase_ )
for cnt in range(config.depth[idx] ):
__lowercase = list_of_state_dict + attention(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
__lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_SCREAMING_SNAKE_CASE = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _lowerCAmelCase ( ):
__lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowercase = g.get_repo('''huggingface/accelerate''' )
__lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
__lowercase = comments[0] if len(lowerCamelCase_ ) > 0 else None
__lowercase = dt.utcnow()
__lowercase = (current_time - issue.updated_at).days
__lowercase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[str] = ShapEPipeline
a : str = ["prompt"]
a : Tuple = ["prompt"]
a : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
a : int = False
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return 8
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModelWithProjection(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__lowercase = PriorTransformer(**_lowerCamelCase )
return model
@property
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__lowercase = ShapERenderer(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.dummy_prior
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_renderer
__lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1024 ,prediction_type='''sample''' ,use_karras_sigmas=_lowerCamelCase ,clip_sample=_lowerCamelCase ,clip_sample_range=1.0 ,)
__lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(_lowerCamelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCamelCase )
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
__lowercase = output.images[0]
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = torch_device == '''cpu'''
__lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_lowerCamelCase ,relax_max_difference=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCamelCase )
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = 1
__lowercase = 2
__lowercase = self.get_dummy_inputs(_lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowercase = batch_size * [inputs[key]]
__lowercase = pipe(**_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
'''a shark''' ,generator=_lowerCamelCase ,guidance_scale=1_5.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='''np''' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCamelCase ,_lowerCamelCase )
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "camembert"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase="absolute" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = "instructblip_vision_model"
def __init__(self ,_lowerCamelCase=1408 ,_lowerCamelCase=6144 ,_lowerCamelCase=39 ,_lowerCamelCase=16 ,_lowerCamelCase=224 ,_lowerCamelCase=14 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1E-6 ,_lowerCamelCase=0.0 ,_lowerCamelCase=1E-1_0 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "instructblip_qformer"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0 ,_lowerCamelCase="absolute" ,_lowerCamelCase=2 ,_lowerCamelCase=1408 ,**_lowerCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = "instructblip"
a : str = True
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=32 ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__lowercase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__lowercase = InstructBlipVisionConfig(**_lowerCamelCase )
__lowercase = InstructBlipQFormerConfig(**_lowerCamelCase )
__lowercase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__lowercase = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.0_2
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
assert x is not None
assert y is not None
__lowercase = len(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# declaring the array for storing the dp values
__lowercase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
__lowercase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowercase = ''''''
__lowercase , __lowercase = m, n
while i > 0 and j > 0:
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = '''AGGTAB'''
_SCREAMING_SNAKE_CASE = '''GXTXAYB'''
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = '''GTAB'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__()
__lowercase = value_function
__lowercase = unet
__lowercase = scheduler
__lowercase = env
__lowercase = env.get_dataset()
__lowercase = {}
for key in self.data.keys():
try:
__lowercase = self.data[key].mean()
except: # noqa: E722
pass
__lowercase = {}
for key in self.data.keys():
try:
__lowercase = self.data[key].std()
except: # noqa: E722
pass
__lowercase = env.observation_space.shape[0]
__lowercase = env.action_space.shape[0]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if type(_lowerCamelCase ) is dict:
return {k: self.to_torch(_lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_lowerCamelCase ,device=self.unet.device )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
for key, val in cond.items():
__lowercase = val.clone()
return x_in
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = x.shape[0]
__lowercase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__lowercase = torch.full((batch_size,) ,_lowerCamelCase ,device=self.unet.device ,dtype=torch.long )
for _ in range(_lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__lowercase = self.value_function(x.permute(0 ,2 ,1 ) ,_lowerCamelCase ).sample
__lowercase = torch.autograd.grad([y.sum()] ,[x] )[0]
__lowercase = self.scheduler._get_variance(_lowerCamelCase )
__lowercase = torch.exp(0.5 * posterior_variance )
__lowercase = model_std * grad
__lowercase = 0
__lowercase = x.detach()
__lowercase = x + scale * grad
__lowercase = self.reset_xa(_lowerCamelCase ,_lowerCamelCase ,self.action_dim )
__lowercase = self.unet(x.permute(0 ,2 ,1 ) ,_lowerCamelCase ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,predict_epsilon=_lowerCamelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
__lowercase = self.reset_xa(_lowerCamelCase ,_lowerCamelCase ,self.action_dim )
__lowercase = self.to_torch(_lowerCamelCase )
return x, y
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=64 ,_lowerCamelCase=32 ,_lowerCamelCase=2 ,_lowerCamelCase=0.1 ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.normalize(_lowerCamelCase ,'''observations''' )
__lowercase = obs[None].repeat(_lowerCamelCase ,axis=0 )
__lowercase = {0: self.to_torch(_lowerCamelCase )}
__lowercase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__lowercase = randn_tensor(_lowerCamelCase ,device=self.unet.device )
__lowercase = self.reset_xa(_lowerCamelCase ,_lowerCamelCase ,self.action_dim )
__lowercase = self.to_torch(_lowerCamelCase )
# run the diffusion process
__lowercase , __lowercase = self.run_diffusion(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# sort output trajectories by value
__lowercase = y.argsort(0 ,descending=_lowerCamelCase ).squeeze()
__lowercase = x[sorted_idx]
__lowercase = sorted_values[:, :, : self.action_dim]
__lowercase = actions.detach().cpu().numpy()
__lowercase = self.de_normalize(_lowerCamelCase ,key='''actions''' )
# select the action with the highest value
if y is not None:
__lowercase = 0
else:
# if we didn't run value guiding, select a random action
__lowercase = np.random.randint(0 ,_lowerCamelCase )
__lowercase = denorm_actions[selected_index, 0]
return denorm_actions
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = '''Muhammad Umer Farooq'''
_SCREAMING_SNAKE_CASE = '''MIT'''
_SCREAMING_SNAKE_CASE = '''1.0.0'''
_SCREAMING_SNAKE_CASE = '''Muhammad Umer Farooq'''
_SCREAMING_SNAKE_CASE = '''contact@muhammadumerfarooq.me'''
_SCREAMING_SNAKE_CASE = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
super().__init__()
__lowercase = []
__lowercase = domain
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain ,_lowerCamelCase )
self.urls.append(_lowerCamelCase )
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return ".".join(get_sub_domain_name(lowerCamelCase_ ).split('''.''' )[-2:] )
def _lowerCAmelCase ( lowerCamelCase_ : str ):
return parse.urlparse(lowerCamelCase_ ).netloc
def _lowerCAmelCase ( lowerCamelCase_ : str = "https://github.com" ):
__lowercase = get_domain_name(lowerCamelCase_ )
# Initialize the parser
__lowercase = Parser(lowerCamelCase_ )
try:
# Open URL
__lowercase = requests.get(lowerCamelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(lowerCamelCase_ )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = "altclip_text_model"
def __init__(self ,_lowerCamelCase=250002 ,_lowerCamelCase=1024 ,_lowerCamelCase=24 ,_lowerCamelCase=16 ,_lowerCamelCase=4096 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=514 ,_lowerCamelCase=1 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-0_5 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase="absolute" ,_lowerCamelCase=True ,_lowerCamelCase=768 ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = project_dim
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = "altclip_vision_model"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=3072 ,_lowerCamelCase=512 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3 ,_lowerCamelCase=224 ,_lowerCamelCase=32 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1.0 ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = initializer_factor
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = "altclip"
a : Union[str, Any] = True
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=768 ,_lowerCamelCase=2.6_5_9_2 ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''text_config_dict''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''vision_config_dict''' ,_lowerCamelCase )
super().__init__(**_lowerCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowercase = {}
# This is the complete result when using `text_config_dict`.
__lowercase = AltCLIPTextConfig(**_lowerCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowercase = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowerCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowercase = {}
# This is the complete result when using `vision_config_dict`.
__lowercase = AltCLIPVisionConfig(**_lowerCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowercase = {
str(_lowerCamelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowercase = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowerCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowercase = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
__lowercase = AltCLIPTextConfig(**_lowerCamelCase )
__lowercase = AltCLIPVisionConfig(**_lowerCamelCase )
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = 1.0
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_SCREAMING_SNAKE_CASE = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = VOCAB_FILES_NAMES
a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_SCREAMING_SNAKE_CASE = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_SCREAMING_SNAKE_CASE = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCAmelCase__ )
class __lowercase :
'''simple docstring'''
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = False ,_lowerCamelCase = False ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,**_lowerCamelCase ,)
elif titles is None or texts is None:
__lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase ,_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = titles if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [titles]
__lowercase = texts if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [texts]
__lowercase = len(_lowerCamelCase )
__lowercase = questions if not isinstance(_lowerCamelCase ,_lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts." )
__lowercase = super().__call__(_lowerCamelCase ,_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )['''input_ids''']
__lowercase = super().__call__(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )['''input_ids''']
__lowercase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase ,_lowerCamelCase )
]
}
if return_attention_mask is not False:
__lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase = attention_mask
return self.pad(_lowerCamelCase ,padding=_lowerCamelCase ,max_length=_lowerCamelCase ,return_tensors=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 16 ,_lowerCamelCase = 64 ,_lowerCamelCase = 4 ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
__lowercase = reader_input['''input_ids''']
__lowercase , __lowercase , __lowercase = reader_output[:3]
__lowercase = len(_lowerCamelCase )
__lowercase = sorted(range(_lowerCamelCase ) ,reverse=_lowerCamelCase ,key=relevance_logits.__getitem__ )
__lowercase = []
for doc_id in sorted_docs:
__lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase = sequence_ids.index(self.pad_token_id )
else:
__lowercase = len(_lowerCamelCase )
__lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_lowerCamelCase ,top_spans=_lowerCamelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_lowerCamelCase ,start_index=_lowerCamelCase ,end_index=_lowerCamelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
__lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase = sorted(_lowerCamelCase ,key=lambda _lowerCamelCase : x[1] ,reverse=_lowerCamelCase )
__lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
__lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = VOCAB_FILES_NAMES
a : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
a : int = ["input_ids", "attention_mask"]
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE = '''src/transformers'''
_SCREAMING_SNAKE_CASE = '''docs/source/en'''
_SCREAMING_SNAKE_CASE = '''.'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Find the start prompt.
__lowercase = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
__lowercase = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_SCREAMING_SNAKE_CASE = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCamelCase_ )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
__lowercase = 2 if text == '''✅''' or text == '''❌''' else len(lowerCamelCase_ )
__lowercase = (width - text_length) // 2
__lowercase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ):
__lowercase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowercase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase_ ):
__lowercase = None
if attr_name.endswith('''Tokenizer''' ):
__lowercase = slow_tokenizers
__lowercase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__lowercase = fast_tokenizers
__lowercase = attr_name[:-1_3]
elif _re_tf_models.match(lowerCamelCase_ ) is not None:
__lowercase = tf_models
__lowercase = _re_tf_models.match(lowerCamelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase_ ) is not None:
__lowercase = flax_models
__lowercase = _re_flax_models.match(lowerCamelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase_ ) is not None:
__lowercase = pt_models
__lowercase = _re_pt_models.match(lowerCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowercase = True
break
# Try again after removing the last word in the name
__lowercase = ''''''.join(camel_case_split(lowerCamelCase_ )[:-1] )
# Let's build that table!
__lowercase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowercase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowercase = [len(lowerCamelCase_ ) + 2 for c in columns]
__lowercase = max([len(lowerCamelCase_ ) for name in model_names] ) + 2
# Build the table per se
__lowercase = '''|''' + '''|'''.join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for c, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__lowercase = {True: '''✅''', False: '''❌'''}
for name in model_names:
__lowercase = model_name_to_prefix[name]
__lowercase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for l, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + "|\n"
return table
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any]=False ):
__lowercase , __lowercase , __lowercase , __lowercase = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__lowercase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = True ,) -> Optional[int]:
'''simple docstring'''
__lowercase = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase ,_lowerCamelCase ) )]
if identifier is not None:
__lowercase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
for n_ in n_identifier:
__lowercase = [file for file in files if n_ not in file]
else:
__lowercase = [file for file in files if n_identifier not in file]
__lowercase = ignore_files or []
ignore_files.append('''__init__.py''' )
__lowercase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' ,_lowerCamelCase )
if only_modules:
__lowercase = file.split('''.''' )[0]
try:
__lowercase = getattr(_lowerCamelCase ,_lowerCamelCase )
__lowercase = doctest.DocTestSuite(_lowerCamelCase )
__lowercase = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
__lowercase = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''modeling'''
__lowercase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase ,ignore_files=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''tokenization'''
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = '''configuration'''
self.analyze_directory(_lowerCamelCase ,identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = Path('''src/transformers''' )
__lowercase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_lowerCamelCase ,n_identifier=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = Path('''docs/source''' )
__lowercase = ['''favicon.ico''']
self.analyze_directory(_lowerCamelCase ,ignore_files=_lowerCamelCase ,only_modules=_lowerCamelCase )
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowercase ( pl.LightningModule ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str ):
# load longformer model from model identifier
__lowercase = LongformerModel.from_pretrained(lowerCamelCase_ )
__lowercase = LightningModel(lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase_ )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_SCREAMING_SNAKE_CASE = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : Optional[int] = ["input_ids", "attention_mask"]
a : Tuple = DistilBertTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> int:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {f'''funnel-transformer/{name}''': 5_1_2 for name in _model_names}
_SCREAMING_SNAKE_CASE = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : str = PRETRAINED_INIT_CONFIGURATION
a : List[Any] = FunnelTokenizer
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = 2
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<sep>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<cls>" ,_lowerCamelCase="<mask>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase="##" ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,clean_text=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,wordpieces_prefix=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> int:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "git_vision_model"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=3072 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3 ,_lowerCamelCase=224 ,_lowerCamelCase=16 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "git"
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=6 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=1024 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0 ,_lowerCamelCase="absolute" ,_lowerCamelCase=True ,_lowerCamelCase=False ,_lowerCamelCase=101 ,_lowerCamelCase=102 ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__lowercase = GitVisionConfig(**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = num_image_with_embedding
__lowercase = bos_token_id
__lowercase = eos_token_id
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : Dict = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = TextaTextGenerationPipeline(model=_lowerCamelCase ,tokenizer=_lowerCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = generator('''Something there''' )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ANY(_lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__lowercase = generator(['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] ,)
__lowercase = generator(
['''This is great !''', '''Something else'''] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
[{'''generated_text''': ANY(_lowerCamelCase )}, {'''generated_text''': ANY(_lowerCamelCase )}],
] ,)
with self.assertRaises(_lowerCamelCase ):
generator(4 )
@require_torch
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = pipeline('''text2text-generation''' ,model='''patrickvonplaten/t5-tiny-random''' ,framework='''pt''' )
# do_sample=False necessary for reproducibility
__lowercase = generator('''Something there''' ,do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ''''''}] )
__lowercase = 3
__lowercase = generator(
'''Something there''' ,num_return_sequences=_lowerCamelCase ,num_beams=_lowerCamelCase ,)
__lowercase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = generator('''This is a test''' ,do_sample=_lowerCamelCase ,num_return_sequences=2 ,return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] ,)
__lowercase = generator.model.config.eos_token_id
__lowercase = '''<pad>'''
__lowercase = generator(
['''This is a test''', '''This is a second test'''] ,do_sample=_lowerCamelCase ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_lowerCamelCase ,)
self.assertEqual(
_lowerCamelCase ,[
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] ,)
@require_tf
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = pipeline('''text2text-generation''' ,model='''patrickvonplaten/t5-tiny-random''' ,framework='''tf''' )
# do_sample=False necessary for reproducibility
__lowercase = generator('''Something there''' ,do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,[{'''generated_text''': ''''''}] )
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "SpeechT5FeatureExtractor"
a : Dict = "SpeechT5Tokenizer"
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def __call__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = kwargs.pop('''audio''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''text''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''text_target''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''audio_target''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''sampling_rate''' ,_lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__lowercase = self.feature_extractor(_lowerCamelCase ,*_lowerCamelCase ,sampling_rate=_lowerCamelCase ,**_lowerCamelCase )
elif text is not None:
__lowercase = self.tokenizer(_lowerCamelCase ,**_lowerCamelCase )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=_lowerCamelCase ,*_lowerCamelCase ,sampling_rate=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = targets['''input_values''']
elif text_target is not None:
__lowercase = self.tokenizer(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = targets['''input_ids''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''input_values''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''input_ids''' ,_lowerCamelCase )
__lowercase = kwargs.pop('''labels''' ,_lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__lowercase = self.feature_extractor.pad(_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(_lowerCamelCase ,**_lowerCamelCase )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase ,_lowerCamelCase ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = targets['''input_ids''']
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(_lowerCamelCase ,*_lowerCamelCase ,**_lowerCamelCase )
__lowercase = feature_size_hack
__lowercase = targets['''input_values''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase ,**_lowerCamelCase )
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = int(lowerCamelCase_ )
if n_element < 1:
__lowercase = ValueError('''a should be a positive number''' )
raise my_error
__lowercase = [1]
__lowercase , __lowercase , __lowercase = (0, 0, 0)
__lowercase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
_SCREAMING_SNAKE_CASE = hamming(int(n))
print('''-----------------------------------------------------''')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename='''pytorch_model.bin''' ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
__lowercase = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowercase = x_den * y_den * z_den
__lowercase = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase ( lowerCamelCase_ : int = 3_5 ):
__lowercase = set()
__lowercase = 42
__lowercase = Fraction(0 )
__lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__lowercase = x_num * y_den + x_den * y_num
__lowercase = x_den * y_den
__lowercase = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
__lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowercase = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
__lowercase = int(sqrt(lowerCamelCase_ ) )
__lowercase = int(sqrt(lowerCamelCase_ ) )
__lowercase = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
__lowercase = x_num * y_num
__lowercase = x_den * y_num + x_num * y_den
__lowercase = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
__lowercase = x_num * x_num * y_num * y_num
__lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
__lowercase = int(sqrt(lowerCamelCase_ ) )
__lowercase = int(sqrt(lowerCamelCase_ ) )
__lowercase = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = "convbert"
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase=768 ,_lowerCamelCase=2 ,_lowerCamelCase=9 ,_lowerCamelCase=1 ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = embedding_size
__lowercase = head_ratio
__lowercase = conv_kernel_size
__lowercase = num_groups
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = list[tuple[int, int]]
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = abs(self.pos_x - self.goal_x )
__lowercase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowercase = True
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Path:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
_SCREAMING_SNAKE_CASE = GreedyBestFirst(init, goal)
_SCREAMING_SNAKE_CASE = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_SCREAMING_SNAKE_CASE = 2
for elem in grid:
print(elem)
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ):
__lowercase = HfArgumentParser(lowerCamelCase_ )
__lowercase = parser.parse_args_into_dataclasses()[0]
__lowercase = TensorFlowBenchmark(args=lowerCamelCase_ )
try:
__lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowercase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__lowercase = ''' '''.join(str(lowerCamelCase_ ).split(''' ''' )[:-1] )
__lowercase = ''''''
__lowercase = eval(str(lowerCamelCase_ ).split(''' ''' )[-1] )
__lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__lowercase = full_error_msg + begin_error_msg + str(lowerCamelCase_ )
raise ValueError(lowerCamelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = "owlvit_text_model"
def __init__(self ,_lowerCamelCase=49408 ,_lowerCamelCase=512 ,_lowerCamelCase=2048 ,_lowerCamelCase=12 ,_lowerCamelCase=8 ,_lowerCamelCase=16 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1.0 ,_lowerCamelCase=0 ,_lowerCamelCase=49406 ,_lowerCamelCase=49407 ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = "owlvit_vision_model"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=3072 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3 ,_lowerCamelCase=768 ,_lowerCamelCase=32 ,_lowerCamelCase="quick_gelu" ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1.0 ,**_lowerCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = "owlvit"
a : str = True
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=512 ,_lowerCamelCase=2.6_5_9_2 ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowercase = OwlViTTextConfig(**_lowerCamelCase )
__lowercase = OwlViTVisionConfig(**_lowerCamelCase )
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = return_dict
__lowercase = 1.0
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,**_lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
__lowercase , __lowercase = cls.get_config_dict(_lowerCamelCase ,**_lowerCamelCase )
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
@classmethod
def _UpperCAmelCase (cls ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = {}
__lowercase = text_config
__lowercase = vision_config
return cls.from_dict(_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
return 1E-4
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = -1 ,_lowerCamelCase = -1 ,_lowerCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_lowerCamelCase ,seq_length=_lowerCamelCase ,framework=_lowerCamelCase )
__lowercase = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_lowerCamelCase ,framework=_lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 14
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = AutoencoderKL
a : Dict = "sample"
a : List[str] = 1e-2
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 4
__lowercase = 3
__lowercase = (32, 32)
__lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__lowercase = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowercase = torch.randn_like(_lowerCamelCase )
__lowercase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowercase = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowercase = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowercase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__lowercase = dict(model.named_parameters() )
__lowercase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5E-5 ) )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_lowerCamelCase )
__lowercase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__lowercase = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
__lowercase = torch.manual_seed(0 )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
__lowercase = image.to(_lowerCamelCase )
with torch.no_grad():
__lowercase = model(_lowerCamelCase ,sample_posterior=_lowerCamelCase ,generator=_lowerCamelCase ).sample
__lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowercase = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
__lowercase = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__lowercase = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase ,_lowerCamelCase ,rtol=1E-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCamelCase ) for s in shape] )}.npy"
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ,_lowerCamelCase=0 ,_lowerCamelCase=(4, 3, 512, 512) ,_lowerCamelCase=False ) -> str:
'''simple docstring'''
__lowercase = torch.floataa if fpaa else torch.floataa
__lowercase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase ,_lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase="CompVis/stable-diffusion-v1-4" ,_lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
__lowercase = '''fp16''' if fpaa else None
__lowercase = torch.floataa if fpaa else torch.floataa
__lowercase = AutoencoderKL.from_pretrained(
_lowerCamelCase ,subfolder='''vae''' ,torch_dtype=_lowerCamelCase ,revision=_lowerCamelCase ,)
model.to(_lowerCamelCase ).eval()
return model
def _UpperCAmelCase (self ,_lowerCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_sd_vae_model()
__lowercase = self.get_sd_image(_lowerCamelCase )
__lowercase = self.get_generator(_lowerCamelCase )
with torch.no_grad():
__lowercase = model(_lowerCamelCase ,generator=_lowerCamelCase ,sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
__lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowercase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_sd_vae_model(fpaa=_lowerCamelCase )
__lowercase = self.get_sd_image(_lowerCamelCase ,fpaa=_lowerCamelCase )
__lowercase = self.get_generator(_lowerCamelCase )
with torch.no_grad():
__lowercase = model(_lowerCamelCase ,generator=_lowerCamelCase ,sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
__lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowercase = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_sd_vae_model()
__lowercase = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
__lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowercase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = self.get_sd_vae_model()
__lowercase = self.get_sd_image(_lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowercase = sample[-1, -2:, :2, -2:].flatten().cpu()
__lowercase = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = self.get_sd_vae_model(fpaa=_lowerCamelCase )
__lowercase = self.get_sd_image(_lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=_lowerCamelCase )
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowercase = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = self.get_sd_vae_model(fpaa=_lowerCamelCase )
__lowercase = self.get_sd_image(_lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=_lowerCamelCase )
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_sd_vae_model()
__lowercase = self.get_sd_image(_lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowercase = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_sd_vae_model()
__lowercase = self.get_sd_image(_lowerCamelCase )
__lowercase = self.get_generator(_lowerCamelCase )
with torch.no_grad():
__lowercase = model.encode(_lowerCamelCase ).latent_dist
__lowercase = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowercase = sample[0, -1, -3:, -3:].flatten().cpu()
__lowercase = torch.tensor(_lowerCamelCase )
__lowercase = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_lowerCamelCase ,_lowerCamelCase ,atol=_lowerCamelCase )
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__lowercase = nums[0]
for i in range(1 , len(lowerCamelCase_ ) ):
__lowercase = nums[i]
__lowercase = max(lowerCamelCase_ , ans + num , lowerCamelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_SCREAMING_SNAKE_CASE = int(input('''Enter number of elements : ''').strip())
_SCREAMING_SNAKE_CASE = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 ):
__lowercase = right or len(lowerCamelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase_ , lowerCamelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_SCREAMING_SNAKE_CASE = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
__lowercase = []
for i in range(len(lowerCamelCase_ ) ):
__lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase_ )
return next_generation
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int ):
__lowercase = []
for _ in range(lowerCamelCase_ ):
# Create output image
__lowercase = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase_ )) )
__lowercase = img.load()
# Save cells to image
for x in range(len(lowerCamelCase_ ) ):
for y in range(len(cells[0] ) ):
__lowercase = 2_5_5 - cells[y][x] * 2_5_5
__lowercase = (colour, colour, colour)
# Save image
images.append(lowerCamelCase_ )
__lowercase = new_generation(lowerCamelCase_ )
return images
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = VOCAB_FILES_NAMES
a : List[str] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_INIT_CONFIGURATION
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] = LxmertTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_SCREAMING_SNAKE_CASE = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = "xlm"
a : Tuple = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__(self ,_lowerCamelCase=30145 ,_lowerCamelCase=2048 ,_lowerCamelCase=12 ,_lowerCamelCase=16 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=False ,_lowerCamelCase=False ,_lowerCamelCase=False ,_lowerCamelCase=1 ,_lowerCamelCase=True ,_lowerCamelCase=512 ,_lowerCamelCase=2048**-0.5 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0 ,_lowerCamelCase=1 ,_lowerCamelCase=2 ,_lowerCamelCase=3 ,_lowerCamelCase=5 ,_lowerCamelCase=True ,_lowerCamelCase="first" ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=5 ,_lowerCamelCase=5 ,_lowerCamelCase=0 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = emb_dim
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = gelu_activation
__lowercase = sinusoidal_embeddings
__lowercase = causal
__lowercase = asm
__lowercase = n_langs
__lowercase = use_lang_emb
__lowercase = layer_norm_eps
__lowercase = bos_index
__lowercase = eos_index
__lowercase = pad_index
__lowercase = unk_index
__lowercase = mask_index
__lowercase = is_encoder
__lowercase = max_position_embeddings
__lowercase = embed_init_std
__lowercase = init_std
__lowercase = summary_type
__lowercase = summary_use_proj
__lowercase = summary_activation
__lowercase = summary_proj_to_labels
__lowercase = summary_first_dropout
__lowercase = start_n_top
__lowercase = end_n_top
__lowercase = mask_token_id
__lowercase = lang_id
if "n_words" in kwargs:
__lowercase = kwargs['''n_words''']
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,**_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''ViTFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "timesformer"
def __init__(self ,_lowerCamelCase=224 ,_lowerCamelCase=16 ,_lowerCamelCase=3 ,_lowerCamelCase=8 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=True ,_lowerCamelCase="divided_space_time" ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = num_frames
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = qkv_bias
__lowercase = attention_type
__lowercase = drop_path_rate
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a : Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
if self.train_file is not None:
__lowercase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any ):
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = [json.loads(lowerCamelCase_ ) for line in f.read().splitlines() if (len(lowerCamelCase_ ) > 0 and not line.isspace())]
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
__lowercase = {c: dataset[c] for c in dataset.column_names}
__lowercase = refs
return Dataset.from_dict(lowerCamelCase_ )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
__lowercase = {}
if data_args.train_file is not None:
__lowercase = data_args.train_file
if data_args.validation_file is not None:
__lowercase = data_args.validation_file
__lowercase = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
__lowercase = '''text'''
__lowercase = load_dataset(lowerCamelCase_ , data_files=lowerCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
__lowercase = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
__lowercase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelForMaskedLM.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase = datasets['''train'''].column_names
else:
__lowercase = datasets['''validation'''].column_names
__lowercase = '''text''' if '''text''' in column_names else column_names[0]
__lowercase = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase_ : int ):
# Remove empty lines
__lowercase = [line for line in examples['''text'''] if len(lowerCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=data_args.max_seq_length )
__lowercase = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase = model_args.model_name_or_path
else:
__lowercase = None
__lowercase = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = perplexity
__lowercase = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
a : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
a : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
a : List[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Optional[int] = False
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 100
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__lowercase = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__lowercase = DDIMScheduler(**_lowerCamelCase )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Any:
'''simple docstring'''
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
__lowercase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 )[0]
__lowercase = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__lowercase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCamelCase )
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) ,return_dict=_lowerCamelCase ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__lowercase = init_image.resize((512, 512) )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__lowercase = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
__lowercase = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
__lowercase = '''A robot, 4k photo'''
__lowercase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
__lowercase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' ,torch_dtype=torch.floataa )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase , __lowercase = pipe_prior(
_lowerCamelCase ,image=_lowerCamelCase ,strength=0.8_5 ,generator=_lowerCamelCase ,negative_prompt='''''' ,).to_tuple()
__lowercase = pipeline(
image=_lowerCamelCase ,image_embeds=_lowerCamelCase ,negative_image_embeds=_lowerCamelCase ,hint=_lowerCamelCase ,generator=_lowerCamelCase ,num_inference_steps=100 ,height=512 ,width=512 ,strength=0.5 ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase ,_lowerCamelCase )
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_SCREAMING_SNAKE_CASE = (3, 9, -1_1, 0, 7, 5, 1, -1)
_SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class __lowercase :
'''simple docstring'''
a : int
a : Node | None
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = None
for i in sorted(_lowerCamelCase ,reverse=_lowerCamelCase ):
__lowercase = Node(_lowerCamelCase ,self.head )
def __iter__(self ) -> Iterator[int]:
'''simple docstring'''
__lowercase = self.head
while node:
yield node.data
__lowercase = node.next_node
def __len__(self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__(self ) -> str:
'''simple docstring'''
return " -> ".join([str(_lowerCamelCase ) for node in self] )
def _lowerCAmelCase ( lowerCamelCase_ : SortedLinkedList , lowerCamelCase_ : SortedLinkedList ):
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__lowercase = 0
__lowercase = 0
__lowercase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCamelCase ,_lowerCamelCase ,f"Difference between torch and flax is {diff} (>= {tol})." )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
__lowercase = model(input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = after_output[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase ,1E-3 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
__lowercase = model(
input_ids=_lowerCamelCase ,pixel_values=_lowerCamelCase ,attention_mask=_lowerCamelCase ,output_attentions=_lowerCamelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
pt_model.to(_lowerCamelCase )
pt_model.eval()
# prepare inputs
__lowercase = inputs_dict
__lowercase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase = pt_model(**_lowerCamelCase ).to_tuple()
__lowercase = fx_model(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ,from_pt=_lowerCamelCase )
__lowercase = fx_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ,from_flax=_lowerCamelCase )
pt_model_loaded.to(_lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase = pt_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCamelCase ,pt_output_loaded.numpy() ,4E-2 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowerCamelCase )
__lowercase = fx_state
self.check_pt_flax_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase ,_lowerCamelCase )
__lowercase = VisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowerCamelCase )
__lowercase = load_flax_weights_in_pytorch_model(_lowerCamelCase ,fx_model.params )
self.check_pt_flax_equivalence(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCamelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_inputs_dict.pop('''vision_config''' )
__lowercase = config_inputs_dict.pop('''text_config''' )
__lowercase = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
self.check_equivalence_flax_to_pt(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCamelCase )
__lowercase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCamelCase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
__lowercase = model_a(**_lowerCamelCase )
__lowercase = after_outputs[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase ,1E-5 )
@require_flax
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowerCamelCase ,text_from_pt=_lowerCamelCase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = FlaxViTModel(_lowerCamelCase )
__lowercase = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = FlaxViTModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowerCamelCase ,text_from_pt=_lowerCamelCase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModel(_lowerCamelCase )
__lowercase = FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''np''' )
__lowercase = model(**_lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
__lowercase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,_lowerCamelCase ,atol=1E-3 ) )
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
a : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = AudioClassificationPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
# test with a raw waveform
__lowercase = np.zeros((34000,) )
__lowercase = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = examples
__lowercase = audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase ,[
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] ,)
__lowercase = audio_classifier(_lowerCamelCase ,top_k=1 )
self.assertEqual(
_lowerCamelCase ,[
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] ,)
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
import datasets
# test with a local file
__lowercase = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
__lowercase = dataset[0]['''audio''']['''array''']
__lowercase = audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase ,[
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] ,)
@require_torch
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = '''anton-l/wav2vec2-random-tiny-classifier'''
__lowercase = pipeline('''audio-classification''' ,model=_lowerCamelCase )
__lowercase = np.ones((8000,) )
__lowercase = audio_classifier(_lowerCamelCase ,top_k=4 )
__lowercase = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__lowercase = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_lowerCamelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__lowercase = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__lowercase = audio_classifier(_lowerCamelCase ,top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
import datasets
__lowercase = '''superb/wav2vec2-base-superb-ks'''
__lowercase = pipeline('''audio-classification''' ,model=_lowerCamelCase )
__lowercase = datasets.load_dataset('''anton-l/superb_dummy''' ,'''ks''' ,split='''test''' )
__lowercase = np.array(dataset[3]['''speech'''] ,dtype=np.floataa )
__lowercase = audio_classifier(_lowerCamelCase ,top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=3 ) ,[
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] ,)
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''T5Config'''
def _lowerCAmelCase ( lowerCamelCase_ : jnp.array , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase = jnp.zeros_like(lowerCamelCase_ )
__lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase = shifted_input_ids.at[:, 0].set(lowerCamelCase_ )
__lowercase = jnp.where(shifted_input_ids == -1_0_0 , lowerCamelCase_ , lowerCamelCase_ )
return shifted_input_ids
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = "mt5"
a : Optional[Any] = MTaConfig
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "mt5"
a : int = MTaConfig
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "mt5"
a : Tuple = MTaConfig
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = os.path.join(args.tf_model_dir , '''parameters.json''' )
__lowercase = json.loads(open(lowerCamelCase_ ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
__lowercase = args.output + '''.pt'''
__lowercase = OrderedDict()
with tf.device('''/CPU:0''' ):
__lowercase = tf.train.load_checkpoint(args.tf_model_dir )
__lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__lowercase = reader.get_tensor(lowerCamelCase_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
__lowercase = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
__lowercase = 8
__lowercase = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/moe''' ):
__lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/softmlp/kernel''' ):
__lowercase = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
__lowercase = key_name[-9:-7]
for i in range(1_6 ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
__lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/mlp''' ):
__lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/p1/bias''' ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/p2/kernel''' ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/p2/bias''' ):
__lowercase = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/ln''' ):
__lowercase = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__lowercase = '''model.blocks.%d.feed_forward.norm.bias''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/g''' ):
__lowercase = '''model.blocks.%d.feed_forward.norm.weight''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/att''' ):
__lowercase = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
__lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__lowercase = state[:, 0, :, :]
__lowercase = state[:, 1, :, :]
__lowercase = state[:, 2, :, :]
__lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
__lowercase = torch.tensor(lowerCamelCase_ )
__lowercase = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
__lowercase = torch.tensor(lowerCamelCase_ )
__lowercase = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/o/kernel''' ):
__lowercase = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
__lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/an''' ):
__lowercase = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__lowercase = '''model.blocks.%d.self_attn.norm.bias''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('''/g''' ):
__lowercase = '''model.blocks.%d.self_attn.norm.weight''' % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
__lowercase = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
__lowercase = '''model.%s.weight''' % nlayer
__lowercase = vnp.copy() # same in embedded
__lowercase = torch.tensor(lowerCamelCase_ )
if key_name.startswith('''model/wte''' ):
__lowercase = '''lm_head.weight'''
__lowercase = vnp.copy() # same in embedded
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('''model/wob''' ):
__lowercase = '''final_logits_bias'''
__lowercase = vnp.copy() # same in embedded
__lowercase = state.reshape((1, -1) )
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense/kernel":
__lowercase = '''model.last_project.weight'''
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense_1/bias":
__lowercase = '''model.last_project.bias'''
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(lowerCamelCase_ )
torch.save(lowerCamelCase_ , args.output )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = len(lowerCamelCase_ )
__lowercase = deque()
__lowercase = [False for _ in range(lowerCamelCase_ )]
__lowercase = [-1 for _ in range(lowerCamelCase_ )]
__lowercase = index_of[:]
def strong_connect(lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__lowercase = index # the number when this node is seen
__lowercase = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase_ )
__lowercase = True
for w in g[v]:
if index_of[w] == -1:
__lowercase = strong_connect(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowercase = []
__lowercase = stack.pop()
__lowercase = False
component.append(lowerCamelCase_ )
while w != v:
__lowercase = stack.pop()
__lowercase = False
component.append(lowerCamelCase_ )
components.append(lowerCamelCase_ )
return index
__lowercase = []
for v in range(lowerCamelCase_ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase_ , 0 , lowerCamelCase_ )
return components
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
__lowercase = [[] for _ in range(lowerCamelCase_ )]
for u, v in edges:
g[u].append(lowerCamelCase_ )
return g
if __name__ == "__main__":
# Test
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
_SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _lowerCAmelCase ( lowerCamelCase_ : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _lowerCAmelCase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__lowercase = [1, 2, 3]
with pytest.raises(lowerCamelCase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 )
with pytest.raises(lowerCamelCase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase = [1, 2]
__lowercase = {'''a''': 1, '''b''': 2}
__lowercase = {'''a''': [1, 2], '''b''': [3, 4]}
__lowercase = {'''a''': {'''1''': 1}, '''b''': 2}
__lowercase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__lowercase = [2, 3]
__lowercase = {'''a''': 2, '''b''': 3}
__lowercase = {'''a''': [2, 3], '''b''': [4, 5]}
__lowercase = {'''a''': {'''1''': 2}, '''b''': 3}
__lowercase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=7 ,_lowerCamelCase=3 ,_lowerCamelCase=18 ,_lowerCamelCase=30 ,_lowerCamelCase=400 ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=[0.5, 0.5, 0.5] ,_lowerCamelCase=[0.5, 0.5, 0.5] ,) -> List[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size if size is not None else {'''height''': 18, '''width''': 20}
__lowercase = do_thumbnail
__lowercase = do_align_axis
__lowercase = do_pad
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = DonutImageProcessor if is_vision_available() else None
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = DonutImageProcessingTester(self )
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_std''' ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict=7 ):
__lowercase = None
if token is not None:
__lowercase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
__lowercase = '''636036'''
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
__lowercase = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
return result["workflow_runs"]
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = get_daily_ci_runs(lowerCamelCase_ )
__lowercase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowercase = workflow_run['''id''']
break
return workflow_run_id
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
__lowercase = get_last_daily_ci_runs(lowerCamelCase_ )
if workflow_run_id is not None:
__lowercase = get_artifacts_links(worflow_run_id=lowerCamelCase_ , token=lowerCamelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowercase = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCamelCase_ , artifact_url=lowerCamelCase_ , output_dir=lowerCamelCase_ , token=lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
get_last_daily_ci_artifacts(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = {}
for artifact_name in artifact_names:
__lowercase = os.path.join(lowerCamelCase_ , f"{artifact_name}.zip" )
if os.path.isfile(lowerCamelCase_ ):
__lowercase = {}
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
with z.open(lowerCamelCase_ ) as f:
__lowercase = f.read().decode('''UTF-8''' )
return results
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
_SCREAMING_SNAKE_CASE = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
a : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the training data."} )
a : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
a : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
a : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
a : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {}
if self.train_dir is not None:
__lowercase = self.train_dir
if self.validation_dir is not None:
__lowercase = self.validation_dir
__lowercase = data_files if data_files else None
@dataclass
class __lowercase :
'''simple docstring'''
a : str = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a : str = field(default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
a : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
a : Optional[int] = field(
default=lowerCAmelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase=192 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase=0.6 ) -> str:
'''simple docstring'''
__lowercase = input_size
__lowercase = mask_patch_size
__lowercase = model_patch_size
__lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__lowercase = self.input_size // self.mask_patch_size
__lowercase = self.mask_patch_size // self.model_patch_size
__lowercase = self.rand_size**2
__lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = np.random.permutation(self.token_count )[: self.mask_count]
__lowercase = np.zeros(self.token_count ,dtype=_lowerCamelCase )
__lowercase = 1
__lowercase = mask.reshape((self.rand_size, self.rand_size) )
__lowercase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.stack([example['''pixel_values'''] for example in examples] )
__lowercase = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase_ ) and data_args.train_val_split > 0.0:
__lowercase = ds['''train'''].train_test_split(data_args.train_val_split )
__lowercase = split['''train''']
__lowercase = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCamelCase_ , '''decoder_type''' ):
__lowercase = '''simmim'''
# adapt config
__lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
__lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
__lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelForMaskedImageModeling.from_config(lowerCamelCase_ )
if training_args.do_train:
__lowercase = ds['''train'''].column_names
else:
__lowercase = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__lowercase = data_args.image_column_name
elif "image" in column_names:
__lowercase = '''image'''
elif "img" in column_names:
__lowercase = '''img'''
else:
__lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowercase = Compose(
[
Lambda(lambda lowerCamelCase_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowerCamelCase_ : Any ):
__lowercase = [transforms(lowerCamelCase_ ) for image in examples[image_column_name]]
__lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__lowercase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__lowercase = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase_ )
# Initialize our trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCamelCase_ )
trainer.save_metrics('''eval''' , lowerCamelCase_ )
# Write model card and (optionally) push to hub
__lowercase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Any = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[int] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : Tuple = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
class __lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = ["torch", "transformers", "onnx"]
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = ["vqvae"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase ,mel=_lowerCamelCase ,vqvae=_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_lowerCamelCase ) else 1000
@torch.no_grad()
def __call__(self ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = 0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCamelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_lowerCamelCase ,device=self.device ,)
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.mel.audio_slice_to_image(_lowerCamelCase )
__lowercase = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 255) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_lowerCamelCase ,0 ) ).latent_dist.sample(
generator=_lowerCamelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_lowerCamelCase ,_lowerCamelCase ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_lowerCamelCase ):
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )['''sample''']
else:
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase )['''sample''']
if isinstance(self.scheduler ,_lowerCamelCase ):
__lowercase = self.scheduler.step(
model_output=_lowerCamelCase ,timestep=_lowerCamelCase ,sample=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,)['''prev_sample''']
else:
__lowercase = self.scheduler.step(
model_output=_lowerCamelCase ,timestep=_lowerCamelCase ,sample=_lowerCamelCase ,generator=_lowerCamelCase ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_lowerCamelCase )['''sample''']
__lowercase = (images / 2 + 0.5).clamp(0 ,1 )
__lowercase = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
__lowercase = (images * 255).round().astype('''uint8''' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCamelCase ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCamelCase )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_lowerCamelCase ) )
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler ,_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 255) * 2 - 1
__lowercase = torch.Tensor(_lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase )['''sample''']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
__lowercase = acos(torch.dot(torch.flatten(_lowerCamelCase ) ,torch.flatten(_lowerCamelCase ) ) / torch.norm(_lowerCamelCase ) / torch.norm(_lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCamelCase ) + sin(alpha * theta ) * xa / sin(_lowerCamelCase )
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return int(input_a == input_a == 0 )
def _lowerCAmelCase ( ):
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 56
| 1
|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_SCREAMING_SNAKE_CASE = '''Usage of script: script_name <size_of_canvas:int>'''
_SCREAMING_SNAKE_CASE = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [[False for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
return canvas
def _lowerCAmelCase ( lowerCamelCase_ : list[list[bool]] ):
for i, row in enumerate(lowerCamelCase_ ):
for j, _ in enumerate(lowerCamelCase_ ):
__lowercase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( lowerCamelCase_ : list[list[bool]] ):
__lowercase = np.array(lowerCamelCase_ )
__lowercase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCamelCase_ ):
for c, pt in enumerate(lowerCamelCase_ ):
__lowercase = __judge_point(
lowerCamelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowercase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowercase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( lowerCamelCase_ : bool , lowerCamelCase_ : list[list[bool]] ):
__lowercase = 0
__lowercase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowercase = pt
if pt:
if alive < 2:
__lowercase = False
elif alive == 2 or alive == 3:
__lowercase = True
elif alive > 3:
__lowercase = False
else:
if alive == 3:
__lowercase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_SCREAMING_SNAKE_CASE = int(sys.argv[1])
# main working structure of this module.
_SCREAMING_SNAKE_CASE = create_canvas(canvas_size)
seed(c)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = plt.subplots()
fig.show()
_SCREAMING_SNAKE_CASE = ListedColormap(['''w''', '''k'''])
try:
while True:
_SCREAMING_SNAKE_CASE = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0 ):
__lowercase = -1
__lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowercase = n - a - b
if c * c == (a * a + b * b):
__lowercase = a * b * c
if candidate >= product:
__lowercase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
print('''moving disk from''' , lowerCamelCase_ , '''to''' , lowerCamelCase_ )
def _lowerCAmelCase ( ):
__lowercase = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowerCamelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56
| 1
|
'''simple docstring'''
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = data
__lowercase = None
class __lowercase :
'''simple docstring'''
def __init__(self ) -> Dict:
'''simple docstring'''
__lowercase = None
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.head
while temp is not None:
print(temp.data ,end=''' ''' )
__lowercase = temp.next
print()
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = Node(_lowerCamelCase )
__lowercase = self.head
__lowercase = new_node
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
__lowercase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase = node_a.next
__lowercase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowercase = node_a.next
if node_a is None or node_a is None:
return
__lowercase , __lowercase = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_SCREAMING_SNAKE_CASE = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _lowerCAmelCase ( ):
__lowercase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def _lowerCAmelCase ( lowerCamelCase_ : List[Any]=None ):
if subparsers is not None:
__lowercase = subparsers.add_parser('''config''' , description=lowerCamelCase_ )
else:
__lowercase = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCamelCase_ )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__lowercase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCamelCase_ )
else:
config.to_yaml_file(lowerCamelCase_ )
print(f"accelerate configuration saved at {config_file}" )
def _lowerCAmelCase ( ):
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = MobileBertTokenizer
a : str = MobileBertTokenizerFast
a : Tuple = True
a : Any = True
a : Union[str, Any] = filter_non_english
a : Optional[int] = "google/mobilebert-uncased"
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = '''UNwant\u00E9d,running'''
__lowercase = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[9, 6, 7, 12, 10, 11] )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = '''UNwant\u00E9d,running'''
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=_lowerCamelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
__lowercase = '''UNwant\u00E9d,running'''
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase ,strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase ,strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase ,strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase ,strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BasicTokenizer(do_lower_case=_lowerCamelCase ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__lowercase = {}
for i, token in enumerate(_lowerCamelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_lowerCamelCase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ,_lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowercase = tokenizer_r.encode_plus(
_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,return_token_type_ids=_lowerCamelCase ,return_offsets_mapping=_lowerCamelCase ,add_special_tokens=_lowerCamelCase ,)
__lowercase = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase ,'''do_lower_case''' ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = ['''的''', '''人''', '''有''']
__lowercase = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = tokenizer_p.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer_r.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = self.tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
__lowercase = tokenizer_r.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer_p.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
| 56
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_SCREAMING_SNAKE_CASE = 8
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple=BITS ):
__lowercase = x.device
__lowercase = (x * 2_5_5).int().clamp(0 , 2_5_5 )
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ )
__lowercase = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
__lowercase = rearrange(lowerCamelCase_ , '''b c h w -> b c 1 h w''' )
__lowercase = ((x & mask) != 0).float()
__lowercase = rearrange(lowerCamelCase_ , '''b c d h w -> b (c d) h w''' )
__lowercase = bits * 2 - 1
return bits
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[str]=BITS ):
__lowercase = x.device
__lowercase = (x > 0).int()
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ , dtype=torch.intaa )
__lowercase = rearrange(lowerCamelCase_ , '''d -> d 1 1''' )
__lowercase = rearrange(lowerCamelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
__lowercase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def _lowerCAmelCase ( self : Tuple , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowercase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowercase = self.alphas_cumprod[timestep]
__lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowercase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowercase = self._get_variance(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowercase = model_output.device if torch.is_tensor(lowerCamelCase_ ) else '''cpu'''
__lowercase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ ).to(lowerCamelCase_ )
__lowercase = self._get_variance(lowerCamelCase_ , lowerCamelCase_ ) ** 0.5 * eta * noise
__lowercase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def _lowerCAmelCase ( self : str , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int]="epsilon" , lowerCamelCase_ : Dict=None , lowerCamelCase_ : bool = True , ):
__lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = self.alphas_cumprod[t]
__lowercase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowercase = model_output
else:
raise ValueError(f"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowercase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowercase = 0
if t > 0:
__lowercase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase_ ).to(model_output.device )
__lowercase = (self._get_variance(lowerCamelCase_ , predicted_variance=lowerCamelCase_ ) ** 0.5) * noise
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 1.0 ,) -> int:
'''simple docstring'''
super().__init__()
__lowercase = bit_scale
__lowercase = (
ddim_bit_scheduler_step if isinstance(_lowerCamelCase ,_lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase = 256 ,_lowerCamelCase = 256 ,_lowerCamelCase = 50 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
__lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=_lowerCamelCase ,)
__lowercase = decimal_to_bits(_lowerCamelCase ) * self.bit_scale
__lowercase = latents.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
__lowercase = bits_to_decimal(_lowerCamelCase )
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = int(lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple=3_0_0 ):
# docstyle-ignore
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowercase = f"{elt:.6f}" if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowercase :
'''simple docstring'''
a : Optional[Any] = 5
a : str = 0.2
def __init__(self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 300 ,) -> List[str]:
'''simple docstring'''
__lowercase = total
__lowercase = '''''' if prefix is None else prefix
__lowercase = leave
__lowercase = parent
__lowercase = width
__lowercase = None
__lowercase = None
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ,_lowerCamelCase = None ) -> int:
'''simple docstring'''
__lowercase = value
if comment is not None:
__lowercase = comment
if self.last_value is None:
__lowercase = __lowercase = time.time()
__lowercase = __lowercase = value
__lowercase = __lowercase = None
__lowercase = self.warmup
__lowercase = 1
self.update_bar(_lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowercase = time.time()
__lowercase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowercase = self.elapsed_time / (value - self.start_value)
else:
__lowercase = None
if value >= self.total:
__lowercase = self.total
__lowercase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowercase = self.average_time_per_item * (self.total - value)
self.update_bar(_lowerCamelCase )
__lowercase = value
__lowercase = current_time
if self.average_time_per_item is None:
__lowercase = 1
else:
__lowercase = max(int(self.update_every / self.average_time_per_item ) ,1 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Dict:
'''simple docstring'''
__lowercase = ''' ''' * (len(str(self.total ) ) - len(str(_lowerCamelCase ) )) + str(_lowerCamelCase )
if self.elapsed_time is None:
__lowercase = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
__lowercase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
__lowercase = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
f" {format_time(self.predicted_remaining )}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]"
self.display()
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowercase = disp.display(disp.HTML(self.html_code ) ,display_id=_lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
super().__init__(_lowerCamelCase )
__lowercase = None if column_names is None else [column_names]
__lowercase = None
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowercase = disp.display(disp.HTML(self.html_code ) ,display_id=_lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.inner_table is None:
__lowercase = [list(values.keys() ), list(values.values() )]
else:
__lowercase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowerCamelCase )
__lowercase = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ,_lowerCamelCase=300 ) -> int:
'''simple docstring'''
__lowercase = NotebookProgressBar(_lowerCamelCase ,prefix=_lowerCamelCase ,parent=self ,width=_lowerCamelCase )
return self.child_bar
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = None
self.display()
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ) -> List[Any]:
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = False
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__lowercase = 0
__lowercase = 0
__lowercase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__lowercase = NotebookTrainingTracker(state.max_steps ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 ,comment=f"Epoch {epoch}/{state.num_train_epochs}" ,force_update=self._force_next_update ,)
__lowercase = False
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if not has_length(_lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowercase = self.training_tracker.add_child(len(_lowerCamelCase ) )
else:
__lowercase = NotebookProgressBar(len(_lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowercase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowercase = state.global_step
self.training_tracker.write_line(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
if self.training_tracker is not None:
__lowercase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__lowercase = log['''loss''']
break
if self.first_column == "Epoch":
__lowercase = int(state.epoch )
else:
__lowercase = state.global_step
__lowercase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__lowercase = re.sub(R'''\_loss$''' ,'''''' ,_lowerCamelCase )
__lowercase = metrics.pop('''total_flos''' ,_lowerCamelCase )
__lowercase = metrics.pop('''epoch''' ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_runtime" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_samples_per_second" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_steps_per_second" ,_lowerCamelCase )
__lowercase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" ,_lowerCamelCase )
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
__lowercase = v
else:
__lowercase = k.split('''_''' )
__lowercase = ''' '''.join([part.capitalize() for part in splits[1:]] )
__lowercase = v
self.training_tracker.write_line(_lowerCamelCase )
self.training_tracker.remove_child()
__lowercase = None
# Evaluation takes a long time so we should force the next update.
__lowercase = True
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" ,force_update=_lowerCamelCase )
__lowercase = None
| 56
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,)
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
| 56
| 1
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=sys.maxsize ) -> str:
'''simple docstring'''
__lowercase = '''bilinear'''
__lowercase = max_size
__lowercase = short_edge_length
def __call__(self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = []
for img in imgs:
__lowercase , __lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowercase = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowercase = size * 1.0 / min(_lowerCamelCase ,_lowerCamelCase )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
if max(_lowerCamelCase ,_lowerCamelCase ) > self.max_size:
__lowercase = self.max_size * 1.0 / max(_lowerCamelCase ,_lowerCamelCase )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase = int(neww + 0.5 )
__lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowercase = Image.fromarray(_lowerCamelCase )
__lowercase = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
__lowercase = np.asarray(_lowerCamelCase )
else:
__lowercase = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowercase = nn.functional.interpolate(
_lowerCamelCase ,(newh, neww) ,mode=self.interp_method ,align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
__lowercase = cfg.INPUT.FORMAT
__lowercase = cfg.SIZE_DIVISIBILITY
__lowercase = cfg.PAD_VALUE
__lowercase = cfg.INPUT.MAX_SIZE_TEST
__lowercase = cfg.MODEL.DEVICE
__lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
__lowercase = [im.shape[-2:] for im in images]
__lowercase = [
nn.functional.pad(
_lowerCamelCase ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_lowerCamelCase ,_lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_lowerCamelCase ,images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_lowerCamelCase ,torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
__lowercase = torch.tensor([im.shape[:2] for im in images] )
__lowercase = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowercase = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
__lowercase , __lowercase = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowercase = torch.true_divide(_lowerCamelCase ,_lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__lowercase , __lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 56
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
if not conversation_id:
__lowercase = uuid.uuida()
if past_user_inputs is None:
__lowercase = []
if generated_responses is None:
__lowercase = []
__lowercase = conversation_id
__lowercase = past_user_inputs
__lowercase = generated_responses
__lowercase = text
def __eq__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = False ) -> Optional[int]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\"." )
__lowercase = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase = text
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
self.generated_responses.append(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ) -> int:
'''simple docstring'''
__lowercase = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase = '''user''' if is_user else '''bot'''
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
lowerCAmelCase__ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__lowercase = self.tokenizer.eos_token
def _UpperCAmelCase (self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {}
__lowercase = {}
__lowercase = {}
if min_length_for_response is not None:
__lowercase = min_length_for_response
if minimum_tokens is not None:
__lowercase = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=0 ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = super().__call__(_lowerCamelCase ,num_workers=_lowerCamelCase ,**_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer ,'''_build_conversation_input_ids''' ):
__lowercase = self.tokenizer._build_conversation_input_ids(_lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase = self._legacy_parse_and_tokenize(_lowerCamelCase )
if self.framework == "pt":
__lowercase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=10 ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = generate_kwargs.get('''max_length''' ,self.model.config.max_length )
__lowercase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase = max_length - minimum_tokens
__lowercase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase = model_inputs['''attention_mask'''][:, -trim:]
__lowercase = model_inputs.pop('''conversation''' )
__lowercase = max_length
__lowercase = self.model.generate(**_lowerCamelCase ,**_lowerCamelCase )
if self.model.config.is_encoder_decoder:
__lowercase = 1
else:
__lowercase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=True ) -> Dict:
'''simple docstring'''
__lowercase = model_outputs['''output_ids''']
__lowercase = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ,)
__lowercase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowerCamelCase )
return conversation
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.eos_token_id
__lowercase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) )
if len(_lowerCamelCase ) > self.tokenizer.model_max_length:
__lowercase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 56
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = 32 ,_lowerCamelCase=PILImageResampling.BILINEAR ,_lowerCamelCase = True ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = size_divisor
__lowercase = resample
super().__init__(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(_lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase = height // size_divisor * size_divisor
__lowercase = width // size_divisor * size_divisor
__lowercase = resize(_lowerCamelCase ,(new_h, new_w) ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
return image
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase=None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = size_divisor if size_divisor is not None else self.size_divisor
__lowercase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for img in images]
if do_resize:
__lowercase = [self.resize(_lowerCamelCase ,size_divisor=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCamelCase ,scale=1 / 255 ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 56
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
self.check_model_type(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase ,(Image.Image, str) ) and isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(_lowerCamelCase ,**_lowerCamelCase )
return results
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] ,return_tensors=self.framework ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )
__lowercase = self.image_processor(images=_lowerCamelCase ,return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
return model_inputs
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.model(**_lowerCamelCase )
return model_outputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=5 ) -> Tuple:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(_lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase ,_lowerCamelCase )]
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , ):
__lowercase = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
__lowercase , __lowercase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__lowercase = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
assert base_extractor.is_extractable(lowerCamelCase_ )
__lowercase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowercase = file_path.read_text(encoding='''utf-8''' )
else:
__lowercase = output_path.read_text(encoding='''utf-8''' )
__lowercase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , ):
__lowercase = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
__lowercase = input_paths[compression_format]
if input_path is None:
__lowercase = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
__lowercase = Extractor.infer_extractor_format(lowerCamelCase_ )
assert extractor_format is not None
__lowercase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowercase = file_path.read_text(encoding='''utf-8''' )
else:
__lowercase = output_path.read_text(encoding='''utf-8''' )
__lowercase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ):
import tarfile
__lowercase = tmp_path / '''data_dot_dot'''
directory.mkdir()
__lowercase = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
import tarfile
__lowercase = tmp_path / '''data_sym_link'''
directory.mkdir()
__lowercase = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=lowerCamelCase_ )
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ):
__lowercase = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
__lowercase = insecure_tar_files[insecure_tar_file]
__lowercase = tmp_path / '''extracted'''
TarExtractor.extract(lowerCamelCase_ , lowerCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__lowercase = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
__lowercase = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(lowerCamelCase_ )
assert zipfile.is_zipfile(str(lowerCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase_ ) # but we're right
| 56
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''')
_SCREAMING_SNAKE_CASE = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_SCREAMING_SNAKE_CASE = '''>>zh<<'''
_SCREAMING_SNAKE_CASE = '''Helsinki-NLP/'''
if is_torch_available():
_SCREAMING_SNAKE_CASE = '''pt'''
elif is_tf_available():
_SCREAMING_SNAKE_CASE = '''tf'''
else:
_SCREAMING_SNAKE_CASE = '''jax'''
@require_sentencepiece
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : int = MarianTokenizer
a : Dict = False
a : int = True
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
super().setUp()
__lowercase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_lowerCamelCase ,save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_lowerCamelCase ,save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowerCamelCase ,save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_lowerCamelCase ,save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = '''</s>'''
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<pad>''' )
self.assertEqual(len(_lowerCamelCase ) ,9 )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
__lowercase = en_de_tokenizer(['''I am a small frog'''] ,return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_lowerCamelCase ,batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = [x.name for x in Path(_lowerCamelCase ).glob('''*''' )]
self.assertIn('''source.spm''' ,_lowerCamelCase )
MarianTokenizer.from_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tok(['''I am a tiny frog''', '''I am a small frog'''] ,padding=_lowerCamelCase ,return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase ,model_name='''Helsinki-NLP/opus-mt-en-de''' ,revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' ,decode_kwargs={'''use_source_tokenizer''': True} ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__lowercase = '''Tämä on testi'''
__lowercase = '''This is a test'''
__lowercase = [76, 7, 2047, 2]
__lowercase = [69, 12, 11, 940, 2]
__lowercase = tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokenizer(text_target=_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 56
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowerCamelCase_ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowerCamelCase_ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
__lowercase = load_checkpoint(lowerCamelCase_ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowerCamelCase_ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowerCamelCase_ ).half().eval()
model.load_state_dict(lowerCamelCase_ )
# Check results
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 56
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase=0.0_1 ,_lowerCamelCase=1000 ) -> List[Any]:
'''simple docstring'''
__lowercase = p_stop
__lowercase = max_length
def __iter__(self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowercase = random.random() < self.p_stop
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=True ) -> Tuple:
'''simple docstring'''
__lowercase = [
BatchSamplerShard(_lowerCamelCase ,2 ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
for i in range(2 )
]
__lowercase = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] ,[len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
__lowercase = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
__lowercase = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
__lowercase = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
__lowercase = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_lowerCamelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,even_batches=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase ,_lowerCamelCase ,split_batches=_lowerCamelCase ,even_batches=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowercase = [BatchSamplerShard(_lowerCamelCase ,2 ,_lowerCamelCase ,even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=2 ,_lowerCamelCase=False ) -> str:
'''simple docstring'''
random.seed(_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
__lowercase = [
IterableDatasetShard(
_lowerCamelCase ,batch_size=_lowerCamelCase ,drop_last=_lowerCamelCase ,num_processes=_lowerCamelCase ,process_index=_lowerCamelCase ,split_batches=_lowerCamelCase ,)
for i in range(_lowerCamelCase )
]
__lowercase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
__lowercase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowercase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
__lowercase = []
for idx in range(0 ,len(_lowerCamelCase ) ,_lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase ,reference[: len(_lowerCamelCase )] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 42
__lowercase = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
# Edge case with a very small dataset
__lowercase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase ,_lowerCamelCase ,batch_size=4 ,drop_last=_lowerCamelCase ,split_batches=_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=_lowerCamelCase )
__lowercase = SkipBatchSampler(_lowerCamelCase ,2 )
self.assertListEqual(list(_lowerCamelCase ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = DataLoader(list(range(16 ) ) ,batch_size=4 )
__lowercase = skip_first_batches(_lowerCamelCase ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
Accelerator()
__lowercase = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 56
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase = "▁" ,_lowerCamelCase = True ,_lowerCamelCase = "<unk>" ,_lowerCamelCase = "</s>" ,_lowerCamelCase = "<pad>" ,) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['''token''']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) ,''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_lowerCamelCase ,add_prefix_space=_lowerCamelCase )
__lowercase = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" ,special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] ,)
__lowercase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> Union[str, Any]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [files]
self._tokenizer.train(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 8000 ,_lowerCamelCase = True ,) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCamelCase ,)
self._tokenizer.train_from_iterator(_lowerCamelCase ,trainer=_lowerCamelCase )
self.add_unk_id()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['''unk''']['''id''']
__lowercase = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 56
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = [int(lowerCamelCase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase_ ) == 4 and all(0 <= int(lowerCamelCase_ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input().strip()
_SCREAMING_SNAKE_CASE = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 56
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = "roberta-prelayernorm"
def __init__(self ,_lowerCamelCase=50265 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase="absolute" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "open-llama"
def __init__(self ,_lowerCamelCase=100000 ,_lowerCamelCase=4096 ,_lowerCamelCase=11008 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase="silu" ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=True ,_lowerCamelCase=0 ,_lowerCamelCase=1 ,_lowerCamelCase=2 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = rms_norm_eps
__lowercase = use_cache
__lowercase = kwargs.pop(
'''use_memorry_efficient_attention''' ,_lowerCamelCase )
__lowercase = hidden_dropout_prob
__lowercase = attention_dropout_prob
__lowercase = use_stable_embedding
__lowercase = shared_input_output_embedding
__lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,tie_word_embeddings=_lowerCamelCase ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('''type''' ,_lowerCamelCase )
__lowercase = self.rope_scaling.get('''factor''' ,_lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase ,_lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 56
|
'''simple docstring'''
import math
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
__lowercase = factor * value
__lowercase = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 56
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
__lowercase = quote(lowerCamelCase_ )
return hfh.hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' , revision=lowerCamelCase_ )
| 56
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowercase = (low + high) // 2
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_subarray(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = max_cross_sum(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase , __lowercase = float('''-inf''' ), -1
__lowercase = 0
for i in range(lowerCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowercase = summ
__lowercase = i
__lowercase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowercase = summ
__lowercase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [randint(1 , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
__lowercase = time.time()
max_subarray(lowerCamelCase_ , 0 , input_size - 1 )
__lowercase = time.time()
return end - start
def _lowerCAmelCase ( ):
__lowercase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowercase = [time_max_subarray(lowerCamelCase_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(lowerCamelCase_ , lowerCamelCase_ ):
print(lowerCamelCase_ , '''\t\t''' , lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.