code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase_ : str = randint(-50_00 , 50_00 )
return (arr, r)
_lowerCamelCase = make_dataset()
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_SCREAMING_SNAKE_CASE , 3 ):
if sum(_SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(_SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase_ : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase_ : str = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
return (min(_SCREAMING_SNAKE_CASE ), min(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 67 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int ="mra"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=1 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case="absolute" ,_snake_case=4 ,_snake_case="full" ,_snake_case=0 ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Optional[Any] = block_per_row
UpperCAmelCase_ : Any = approx_mode
UpperCAmelCase_ : Dict = initial_prior_first_n_blocks
UpperCAmelCase_ : str = initial_prior_diagonal_n_blocks
| 67 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = CustomTokenizer
pass
| 30 | """simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : List[Any] = 8
# DPR tok
__snake_case : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__snake_case : List[Any] = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) )
__snake_case : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Optional[int] = {'unk_token': '<unk>'}
__snake_case : Optional[int] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__snake_case : int = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : str = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def A_ ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A_ ( self : List[str] ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A_ ( self : Tuple ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_dummy_dataset()
__snake_case : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__snake_case : Union[str, Any] = dataset
__snake_case : Union[str, Any] = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A_ ( self : Optional[int] , __a : bool ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_dummy_dataset()
__snake_case : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__snake_case : int = os.path.join(self.tmpdirname , 'dataset' )
__snake_case : Any = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__snake_case : List[str] = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__snake_case : Any = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case : int = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__snake_case : str = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__snake_case : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__snake_case : str = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = 1
__snake_case : int = self.get_dummy_canonical_hf_index_retriever()
__snake_case : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : List[Any] = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__snake_case : int = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__snake_case : Dict = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : str = 1
__snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__snake_case : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : Any = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : Dict = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 1
__snake_case : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__snake_case : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : Tuple = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : Tuple = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : int = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = 1
__snake_case : Union[str, Any] = self.get_dummy_legacy_index_retriever()
__snake_case : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case : List[Any] = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__snake_case : List[Any] = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__snake_case : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Tuple = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
import torch
__snake_case : Dict = 1
__snake_case : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
__snake_case : int = [[5, 7], [10, 11]]
__snake_case : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : Dict = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__snake_case , __snake_case , __snake_case : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__snake_case : Union[str, Any] = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_dpr_ctx_encoder_tokenizer()
__snake_case : Any = 1
__snake_case : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__snake_case : List[str] = [[5, 7], [10, 11]]
__snake_case : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case : int = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
from __future__ import annotations
def __A ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , )-> tuple[int, float, str]:
"""simple docstring"""
_UpperCAmelCase = cipher_alphabet or [chr(__lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCAmelCase = {
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
_UpperCAmelCase = frequencies_dict
if not case_sensitive:
_UpperCAmelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCAmelCase = decrypted_with_shift.lower().count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCAmelCase = decrypted_with_shift.count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCAmelCase = min(
__lowerCAmelCase , key=__lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 39 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[Any] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = (3, 32, 128)
snake_case_ : Tuple = tempfile.mkdtemp()
# fmt: off
snake_case_ : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ : Optional[int] = dict(zip(__a , range(len(__a ) ) ) )
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
snake_case_ : Union[str, Any] = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
snake_case_ : Dict = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def UpperCAmelCase_ ( self : str , **_A : Any ) -> str:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCAmelCase_ ( self : Optional[int] , **_A : Any ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
snake_case_ : Dict = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ : str = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
snake_case_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case_ : str = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
snake_case_ : Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : Any = image_processor(__a , return_tensors='np' )
snake_case_ : Optional[int] = processor(images=__a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : int = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : Tuple = 'test'
snake_case_ : Tuple = processor(text=__a )
snake_case_ : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = self.get_image_processor()
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Optional[Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : Dict = 'test'
snake_case_ : Tuple = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : List[Any] = processor.char_decode(__a )
snake_case_ : Any = tokenizer.batch_decode(__a )
snake_case_ : List[Any] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : Optional[int] = None
snake_case_ : List[Any] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case_ : str = torch.randn(1 , 27 , 38 )
snake_case_ : int = torch.randn(1 , 27 , 50257 )
snake_case_ : Optional[int] = torch.randn(1 , 27 , 30522 )
snake_case_ : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 350 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Tuple , _A : Any , _A : str , _A : int=None , _A : str=1 ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = tokenizer
snake_case_ : Optional[int] = dataset
snake_case_ : List[str] = len(_A ) if n_tasks is None else n_tasks
snake_case_ : List[Any] = n_copies
def __iter__( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
snake_case_ : Optional[int] = self.tokenizer(_A , padding=_A , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : List[Any] , _A : Optional[int] , _A : str , _A : Dict ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = start_length
snake_case_ : int = eof_strings
snake_case_ : Dict = tokenizer
def __call__( self : Any , _A : Union[str, Any] , _A : Dict , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_A )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = re.split('(%s)' % '|'.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=20 , **__a ):
snake_case_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
snake_case_ : Optional[Any] = batch['ids'].shape[-1]
snake_case_ : List[str] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
snake_case_ : List[str] = batch['task_id'].repeat(__a )
snake_case_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case_ ,snake_case_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ : Optional[Any] = generated_tokens.cpu().numpy()
snake_case_ : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
snake_case_ : Tuple = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ : int = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def SCREAMING_SNAKE_CASE__ ( ):
# Setup configuration
snake_case_ : Optional[int] = HfArgumentParser(__a )
snake_case_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ : int = 'false'
if args.num_workers is None:
snake_case_ : Any = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
snake_case_ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ : int = tokenizer.eos_token
snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ : List[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
snake_case_ : Dict = load_dataset('openai_humaneval' )
snake_case_ : Optional[Any] = load_metric('code_eval' )
snake_case_ : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
snake_case_ : Optional[int] = args.n_samples // args.batch_size
snake_case_ : Dict = TokenizedDataset(__a , human_eval['test'] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ : Optional[Any] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
snake_case_ ,snake_case_ : Union[str, Any] = accelerator.prepare(__a , __a )
snake_case_ : str = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
snake_case_ : Tuple = []
for task in tqdm(range(__a ) ):
snake_case_ : Union[str, Any] = human_eval['test'][task]['test']
snake_case_ : Union[str, Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ ,snake_case_ : int = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _A ( *UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
if not isinstance(UpperCamelCase_, UpperCamelCase_):
__lowercase = list(UpperCamelCase_)
for i in range(len(UpperCamelCase_)):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _A ( UpperCamelCase_ : Exception) -> bool:
'''simple docstring'''
__lowercase = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(UpperCamelCase_, UpperCamelCase_) and len(exception.args) == 1:
return any(err in exception.args[0] for err in _statements)
return False
def _A ( UpperCamelCase_ : callable = None, UpperCamelCase_ : int = 128) -> int:
'''simple docstring'''
if function is None:
return functools.partial(UpperCamelCase_, starting_batch_size=UpperCamelCase_)
__lowercase = starting_batch_size
def decorator(*UpperCamelCase_ : List[str], **UpperCamelCase_ : Optional[Any]):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(UpperCamelCase_).parameters.keys())
# Guard against user error
if len(UpperCamelCase_) < (len(UpperCamelCase_) + 1):
__lowercase = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:], args[1:])])
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""")
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero.")
try:
return function(UpperCamelCase_, *UpperCamelCase_, **UpperCamelCase_)
except Exception as e:
if should_reduce_batch_size(UpperCamelCase_):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 17 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase , _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = load_tool("""text-classification""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
| 215 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] ) -> Tuple:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
print("""moving disk from""" , __magic_name__ , """to""" , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = int(input("""Height of hanoi: """ ).strip() )
move_tower(__magic_name__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[List[List[str]]] , snake_case__ : List[List[str]] , snake_case__ : int = 1 , snake_case__ : int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ )
}
| 59 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = BlipImageProcessor()
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''')
__a = BlipProcessor(snake_case__ , snake_case__)
processor.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__).tokenizer
def _lowerCamelCase ( self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__).image_processor
def _lowerCamelCase ( self : int):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__a = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__a = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0)
__a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , snake_case__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , snake_case__)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__a = self.prepare_image_inputs()
__a = image_processor(snake_case__ , return_tensors='''np''')
__a = processor(images=snake_case__ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__a = 'lower newer'
__a = processor(text=snake_case__)
__a = tokenizer(snake_case__ , return_token_type_ids=snake_case__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=snake_case__ , images=snake_case__)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
# test if it raises when no input is passed
with pytest.raises(snake_case__):
processor()
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(snake_case__)
__a = tokenizer.batch_decode(snake_case__)
self.assertListEqual(snake_case__ , snake_case__)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__a = 'lower newer'
__a = self.prepare_image_inputs()
__a = processor(text=snake_case__ , images=snake_case__)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
| 353 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case :Tuple = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , '''vision''')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, "Image.Image", List[Dict[str, Any]]] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if "text_queries" in kwargs:
__a = kwargs.pop('''text_queries''')
if isinstance(__SCREAMING_SNAKE_CASE , (str, Image.Image)):
__a = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs['''threshold''']
if "top_k" in kwargs:
__a = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = inputs['''candidate_labels''']
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = candidate_labels.split(''',''')
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(__SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = model_inputs.pop('''target_size''')
__a = model_inputs.pop('''candidate_label''')
__a = model_inputs.pop('''is_last''')
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
__a = []
for model_output in model_outputs:
__a = model_output['''candidate_label''']
__a = BaseModelOutput(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.post_process_object_detection(
outputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE , target_sizes=model_output['''target_size'''])[0]
for index in outputs["scores"].nonzero():
__a = outputs['''scores'''][index].item()
__a = self._get_bounding_box(outputs['''boxes'''][index][0])
__a = {'''score''': score, '''label''': label, '''box''': box}
results.append(__SCREAMING_SNAKE_CASE)
__a = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: x["score"] , reverse=__SCREAMING_SNAKE_CASE)
if top_k:
__a = results[:top_k]
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "torch.Tensor"):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''')
__a , __a , __a , __a = box.int().tolist()
__a = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 131 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=1_7 , __UpperCAmelCase=2_3 , __UpperCAmelCase=1_1 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = parent
lowerCAmelCase__ :Optional[Any] = batch_size
lowerCAmelCase__ :int = seq_length
lowerCAmelCase__ :str = act_dim
lowerCAmelCase__ :Optional[Any] = state_dim
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :str = max_length
lowerCAmelCase__ :Optional[int] = is_training
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCAmelCase__ :List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCAmelCase__ :Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase__ :List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase__ :List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
lowerCAmelCase__ :List[str] = random_attention_mask((self.batch_size, self.seq_length) )
lowerCAmelCase__ :Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def snake_case ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ :int = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Any = config_and_inputs
lowerCAmelCase__ :Optional[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (DecisionTransformerModel,) if is_torch_available() else ()
__magic_name__ :Tuple = ()
__magic_name__ :Tuple = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__magic_name__ :Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__magic_name__ :Union[str, Any] = False
__magic_name__ :Any = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :List[Any] = False
__magic_name__ :Tuple = False
__magic_name__ :Any = False
__magic_name__ :str = False
__magic_name__ :Dict = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DecisionTransformerModelTester(self )
lowerCAmelCase__ :Optional[int] = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[Any] = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__a )
lowerCAmelCase__ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ :Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 2 # number of steps of autoregressive prediction we will perform
lowerCAmelCase__ :Any = 1_0 # defined by the RL environment, may be normalized
lowerCAmelCase__ :List[str] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
lowerCAmelCase__ :List[str] = model.to(__a )
lowerCAmelCase__ :List[Any] = model.config
torch.manual_seed(0 )
lowerCAmelCase__ :str = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
lowerCAmelCase__ :Any = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
lowerCAmelCase__ :str = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCAmelCase__ :List[Any] = state
lowerCAmelCase__ :Optional[Any] = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
lowerCAmelCase__ :List[str] = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
lowerCAmelCase__ :Tuple = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
lowerCAmelCase__ :Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
lowerCAmelCase__ :str = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCAmelCase__ :Tuple = action_pred[0, -1]
lowerCAmelCase__ :Dict = torch.cat([states, state] , dim=1 )
lowerCAmelCase__ :Optional[int] = returns_to_go[0, -1] - reward
lowerCAmelCase__ :List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCAmelCase__ :Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 293 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase) , "Tatoeba directory does not exist.")
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : int = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : List[str] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 60 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase : Tuple = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
return (preds == labels).mean()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0]
lowercase_ = spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
| 313 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 | 1 |
def lowerCamelCase__ ( a , a , a , a , a , a ) -> List[Any]:
if index == r:
for j in range(__snake_case ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A: List[str] = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase__ ( a , a , a ) -> Any:
_A: Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase__ : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 121 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case : Optional[int] = ['small', 'medium', 'large']
__snake_case : Optional[int] = 'lm_head.decoder.weight'
__snake_case : List[Any] = 'lm_head.weight'
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> int:
"""simple docstring"""
A__ : str =torch.load(__snake_case )
A__ : List[Any] =d.pop(__snake_case )
os.makedirs(__snake_case, exist_ok=__snake_case )
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__snake_case : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case : Dict = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__snake_case : List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]):
super().__init__(**__UpperCAmelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , "vision")
self.check_model_type(__UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
a : List[Any] = kwargs.pop("text_queries")
if isinstance(__UpperCAmelCase , (str, Image.Image)):
a : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a : Optional[int] = image
a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
return results
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]):
a : str = {}
if "threshold" in kwargs:
a : Dict = kwargs["threshold"]
if "top_k" in kwargs:
a : str = kwargs["top_k"]
return {}, {}, postprocess_params
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = load_image(inputs["image"])
a : Any = inputs["candidate_labels"]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = candidate_labels.split(",")
a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__UpperCAmelCase):
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework)
a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__UpperCAmelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
a : List[Any] = model_inputs.pop("target_size")
a : Optional[int] = model_inputs.pop("candidate_label")
a : List[Any] = model_inputs.pop("is_last")
a : List[Any] = self.model(**__UpperCAmelCase)
a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None):
a : Dict = []
for model_output in model_outputs:
a : int = model_output["candidate_label"]
a : Any = BaseModelOutput(__UpperCAmelCase)
a : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a : Any = outputs["scores"][index].item()
a : str = self._get_bounding_box(outputs["boxes"][index][0])
a : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(__UpperCAmelCase)
a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)
if top_k:
a : Union[str, Any] = results[:top_k]
return results
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a , a , a , a : List[Any] = box.int().tolist()
a : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 226 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = 384
if "tiny" in model_name:
a : List[str] = [3, 3, 9, 3]
a : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a : Tuple = [3, 3, 27, 3]
a : str = [96, 192, 384, 768]
if "base" in model_name:
a : Union[str, Any] = [3, 3, 27, 3]
a : Dict = [128, 256, 512, 1_024]
a : Any = 512
if "large" in model_name:
a : Optional[Any] = [3, 3, 27, 3]
a : str = [192, 384, 768, 1_536]
a : Dict = 768
if "xlarge" in model_name:
a : str = [3, 3, 27, 3]
a : List[Any] = [256, 512, 1_024, 2_048]
a : List[Any] = 1_024
# set label information
a : int = 150
a : str = "huggingface/label-files"
a : Tuple = "ade20k-id2label.json"
a : Dict = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : Optional[int] = ConvNextConfig(
depths=A_ , hidden_sizes=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
a : Tuple = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : int = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : str = dct.pop(A_ )
a : str = val
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
a : Any = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
a : Tuple = model_name_to_url[model_name]
a : int = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["state_dict"]
a : Optional[Any] = get_upernet_config(A_ )
a : int = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a : Tuple = state_dict.pop(A_ )
if "bn" in key:
a : Optional[int] = key.replace("bn" , "batch_norm" )
a : Any = val
# rename keys
a : Any = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
model.load_state_dict(A_ )
# verify on image
a : Dict = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
a : Union[str, Any] = SegformerImageProcessor()
a : Dict = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
a : Any = model(A_ )
if model_name == "upernet-convnext-tiny":
a : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
a : int = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
a : Union[str, Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
a : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
a : str = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 196 |
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]:
lowercase__: int = set()
# keep track of all the paths to be checked
lowercase__: Optional[int] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__: List[Any] = queue.pop(0 )
# get the last node from the path
lowercase__: Optional[int] = path[-1]
if node not in explored:
lowercase__: Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__: Tuple = list(snake_case )
new_path.append(snake_case )
queue.append(snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__: Tuple = [start]
lowercase__: List[Any] = set(snake_case )
# Keep tab on distances from `start` node.
lowercase__: Tuple = {start: 0, target: -1}
while queue:
lowercase__: Dict = queue.pop(0 )
if node == target:
lowercase__: str = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case )
queue.append(snake_case )
lowercase__: List[str] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196 | 1 |
"""simple docstring"""
import math
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :Tuple = input("""Enter message: """ )
lowerCAmelCase_ :Optional[Any] = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) )
lowerCAmelCase_ :Tuple = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase_ :List[Any] = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase_ :Tuple = decrypt_message(lowercase__ , lowercase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = [""""""] * key
for col in range(lowercase__ ):
lowerCAmelCase_ :str = col
while pointer < len(lowercase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = math.ceil(len(lowercase__ ) / key )
lowerCAmelCase_ :int = key
lowerCAmelCase_ :List[str] = (num_cols * num_rows) - len(lowercase__ )
lowerCAmelCase_ :Dict = [""""""] * num_cols
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Tuple = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase_ :Any = 0
row += 1
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : int ):
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def _lowercase (self : str ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Optional[int] ):
# pass variant but use the non-variant filenames
UpperCAmelCase_ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase_ = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : str ):
UpperCAmelCase_ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : Union[str, Any] ):
# pass variant but use the non-variant filenames
UpperCAmelCase_ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def _lowercase (self : int ):
UpperCAmelCase_ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCAmelCase_ = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __A ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __A=None , __A=None , *__A , **__A ) -> Optional[Any]:
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if config is None:
assert isinstance(self.model , lowerCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
a =self.model.config
else:
a =config
a =data_args
a =self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
a =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a =label_smoothed_nll_loss
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if self.optimizer is None:
a =['''bias''', '''LayerNorm.weight''']
a =[
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
a =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a =Adafactor
a ={'''scale_parameter''': False, '''relative_step''': False}
else:
a =AdamW
a ={
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
a =self.args.learning_rate
if self.sharded_ddp:
a =OSS(
params=lowerCamelCase__ , optim=lowerCamelCase__ , **lowerCamelCase__ , )
else:
a =optimizer_cls(lowerCamelCase__ , **lowerCamelCase__ )
if self.lr_scheduler is None:
a =self._get_lr_scheduler(lowerCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
a =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase__ )
return scheduler
def SCREAMING_SNAKE_CASE ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> int:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a =model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
a =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
a =model(**lowerCamelCase__ , labels=lowerCamelCase__ , use_cache=lowerCamelCase__ )[:2]
else:
# compute label smoothed loss
a =model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
a =torch.nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
a =self.loss_fn(lowerCamelCase__ , lowerCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Dict:
a =inputs.pop('''labels''' )
a =self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return loss
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a =self._prepare_inputs(lowerCamelCase__ )
a ={
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a =self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **lowerCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a =self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] )
a =inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
a =self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a =self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Tuple:
a =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
a =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
a =tensor
return padded_tensor | 357 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = RoFormerTokenizer
__lowerCAmelCase = RoFormerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[int]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a ='''永和服装饰品有限公司,今天天气非常好'''
a ='''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.get_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.get_rust_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass | 215 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase : List[str] = """path-to-your-trained-model"""
_lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_lowerCamelCase : str = """A photo of sks dog in a bucket"""
_lowerCamelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 231 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ = "" ) -> dict[str, float]:
"""simple docstring"""
A__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A__ = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' )
A__ = soup.find_all('''td''' , attrs='''titleColumn''' )
A__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def SCREAMING_SNAKE_CASE ( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
A__ = get_imdb_top_aaa_movies()
with open(lowercase_ , '''w''' , newline='''''' ) as out_file:
A__ = csv.writer(lowercase_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 231 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
UpperCAmelCase_ :Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
UpperCAmelCase_ :int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} )
def __lowerCAmelCase ( self ) -> int:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
lowerCAmelCase_ :Optional[int] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase_ :List[Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
UpperCAmelCase_ :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase_ :Dict = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ :Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase_ :Optional[Any] = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase_ :Any = data_args.train_file.split(""".""" )[-1]
lowerCAmelCase_ :str = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase_ :List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
lowerCAmelCase_ :List[str] = load_dataset("""csv""" , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase_ :int = load_dataset("""json""" , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase_ :Dict = raw_datasets["""train"""].features["""label"""].names
lowerCAmelCase_ :Any = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase_ :Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase_ :Optional[int] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ :Tuple = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ :List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase_ :Union[str, Any] = {"""Refused""": 0, """Entailed""": 1}
lowerCAmelCase_ :List[str] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase_ :List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : List[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase_ :List[str] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
lowerCAmelCase_ :Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase_ :List[Any] = examples["""statement"""]
lowerCAmelCase_ :str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
lowerCAmelCase_ :List[str] = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase_ :str = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
lowerCAmelCase_ :Union[str, Any] = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowerCAmelCase_ :Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowerCAmelCase_ :Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowerCAmelCase_ :List[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowerCAmelCase_ :Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
lowerCAmelCase_ :Optional[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
lowerCAmelCase_ :Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase_ :Optional[Any] = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase_ :Optional[Any] = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ :List[str] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ :Union[str, Any] = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ :Any = None
# Initialize our Trainer
lowerCAmelCase_ :Optional[int] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase_ :Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ :str = last_checkpoint
lowerCAmelCase_ :Any = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase_ :List[Any] = train_result.metrics
lowerCAmelCase_ :List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase_ :Union[str, Any] = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase__ )
trainer.save_metrics("""train""" , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ :List[Any] = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase_ :List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase_ :Dict = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase_ :str = predict_dataset.remove_columns("""label""" )
lowerCAmelCase_ :List[str] = trainer.predict(lowercase__ , metric_key_prefix="""predict""" ).predictions
lowerCAmelCase_ :Optional[int] = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase_ :Optional[int] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowerCAmelCase_ :Any = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def _snake_case ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase = TaTokenizerFast
lowerCamelCase = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 199 | 0 |
'''simple docstring'''
import argparse
import datetime
def _A ( A__ ):
"""simple docstring"""
__lowercase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowercase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
__lowercase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
__lowercase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__lowercase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__lowercase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__lowercase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__lowercase = datetime.date(int(A__ ) , int(A__ ) , int(A__ ) )
# Start math
if m <= 2:
__lowercase = y - 1
__lowercase = m + 12
# maths var
__lowercase = int(str(A__ )[:2] )
__lowercase = int(str(A__ )[2:] )
__lowercase = int(2.6 * m - 5.3_9 )
__lowercase = int(c / 4 )
__lowercase = int(k / 4 )
__lowercase = int(d + k )
__lowercase = int(t + u + v + x )
__lowercase = int(z - (2 * c) )
__lowercase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__lowercase = F"Your date {date_input}, is a {days[str(A__ )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 359 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 52 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] ):
_A : Tuple = []
for i in range(len(UpperCamelCase__ ) ):
_A : Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_A : Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_A : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase__ )
return next_generation
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ):
_A : Union[str, Any] = []
for _ in range(UpperCamelCase__ ):
# Create output image
_A : Optional[int] = Image.new("RGB" , (len(cells[0] ), len(UpperCamelCase__ )) )
_A : Optional[Any] = img.load()
# Save cells to image
for x in range(len(UpperCamelCase__ ) ):
for y in range(len(cells[0] ) ):
_A : Dict = 255 - cells[y][x] * 255
_A : List[str] = (colour, colour, colour)
# Save image
images.append(UpperCamelCase__ )
_A : List[Any] = new_generation(UpperCamelCase__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 11 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple:
if conf_path is None:
lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml"
lowercase : Tuple = load_config(__snake_case , display=__snake_case )
lowercase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase : List[str] = "./model_checkpoints/vqgan_only.pt"
lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase : str = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int:
lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase : str = model.decode(__snake_case )
return xrec
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int:
lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 )
if reload:
lowercase : Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def __magic_name__ ( __snake_case : str ) -> List[str]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str:
lowercase : Optional[int] = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
lowercase : Dict = torch.load(__snake_case , map_location="cpu" )
lowercase : List[Any] = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
lowercase : int = {"state_dict": None}
lowercase : Optional[Any] = None
lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 202 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "distilbert"
lowercase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Any , UpperCAmelCase_ : str=30_522 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=4 * 768 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =sinusoidal_pos_embds
lowerCamelCase__: str =n_layers
lowerCamelCase__: str =n_heads
lowerCamelCase__: str =dim
lowerCamelCase__: Optional[Any] =hidden_dim
lowerCamelCase__: Dict =dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: int =activation
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Optional[Any] =qa_dropout
lowerCamelCase__: int =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Dict ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 273 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0) ->None:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =row, column
lowerCamelCase__: List[str] =[[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__(self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__: List[str] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__: int =max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
lowerCamelCase__: Any =F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase_ : list[float]) -> str:
nonlocal string_format_identifier
lowerCamelCase__: Tuple ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__(self : Optional[int]) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : int , UpperCAmelCase_ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
lowerCamelCase__: str =value
def __add__(self : Dict , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__: Dict =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: List[str] =self[r, c] + another[r, c]
return result
def __neg__(self : str) ->Matrix:
'''simple docstring'''
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =-self[r, c]
return result
def __sub__(self : str , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self : List[str] , UpperCAmelCase_ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__: Dict =Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__: int =F"""Unsupported type given for another ({type(UpperCAmelCase_)})"""
raise TypeError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Matrix:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Optional[int] =self[r, c]
return result
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__: Tuple =v.transpose()
lowerCamelCase__: Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: List[str] =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__: Union[str, Any] =1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__: Optional[int] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =1, 2, -3
lowerCamelCase__: Optional[Any] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 273 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ):
if height >= 1:
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
move_disk(lowerCamelCase , lowerCamelCase )
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
print('moving disk from' , lowerCamelCase , 'to' , lowerCamelCase )
def a_ ( ):
lowerCAmelCase = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCamelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 4 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Any = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__UpperCAmelCase : list[list[int]] = []
for current_row_idx in range(lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = populate_current_row(lowerCAmelCase__ , lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCAmelCase : Tuple = 1, 1
for current_col_idx in range(1 , lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return current_row
def lowercase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCAmelCase : Dict = triangle[current_row_idx - 1][current_col_idx]
__UpperCAmelCase : List[str] = above_to_left_elt + above_to_right_elt
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__UpperCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = [0] + result[-1] + [0]
__UpperCAmelCase : str = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCAmelCase : Dict = sum(divmod(lowerCAmelCase__ , 2 ) )
__UpperCAmelCase : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCAmelCase : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCAmelCase : Any = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase_ ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ : Callable , lowerCAmelCase__ : int ) -> None:
__UpperCAmelCase : Optional[int] = f'{func.__name__}({value})'
__UpperCAmelCase : Optional[Any] = timeit(f'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''OwlViTFeatureExtractor''']
_UpperCamelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Dict = OmegaConf.load(__lowerCamelCase )
A_ : Dict = torch.load(__lowerCamelCase , map_location='''cpu''' )["model"]
A_ : Union[str, Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
A_ : List[str] = {}
A_ : str = "first_stage_model."
for key in keys:
if key.startswith(__lowerCamelCase ):
A_ : Optional[Any] = state_dict[key]
# extract state_dict for UNetLDM
A_ : int = {}
A_ : Any = "model.diffusion_model."
for key in keys:
if key.startswith(__lowerCamelCase ):
A_ : Optional[int] = state_dict[key]
A_ : Tuple = config.model.params.first_stage_config.params
A_ : Any = config.model.params.unet_config.params
A_ : Dict = VQModel(**__lowerCamelCase ).eval()
vqvae.load_state_dict(__lowerCamelCase )
A_ : List[str] = UNetLDMModel(**__lowerCamelCase ).eval()
unet.load_state_dict(__lowerCamelCase )
A_ : str = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCamelCase , )
A_ : Tuple = LDMPipeline(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipeline.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 186 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = []
__snake_case , __snake_case : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : Tuple = result + left + right
return input_list
def lowerCAmelCase_ ( __lowerCamelCase ):
if len(__lowerCamelCase ) <= 1:
return input_list
__snake_case : Optional[Any] = list(__lowerCamelCase )
# iteration for two-way merging
__snake_case : Dict = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
__snake_case : Union[str, Any] = i
__snake_case : int = i + p - 1
__snake_case : Optional[int] = (low + high + 1) // 2
__snake_case : Any = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
__snake_case : Optional[Any] = i
__snake_case : Dict = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_snake_case : str = []
else:
_snake_case : int = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 123 | 0 |
"""simple docstring"""
def a_ ( lowerCAmelCase_ : list ):
__lowerCAmelCase = len(lowerCAmelCase_ )
for i in range(1, lowerCAmelCase_ ):
__lowerCAmelCase = collection[i]
__lowerCAmelCase = 0
__lowerCAmelCase = i - 1
while low <= high:
__lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase = mid - 1
else:
__lowerCAmelCase = mid + 1
for j in range(lowerCAmelCase_, lowerCAmelCase_, -1 ):
__lowerCAmelCase = collection[j - 1]
__lowerCAmelCase = val
return collection
if __name__ == "__main__":
_snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Tuple = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 365 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str="shi-labs/oneformer_demo" ):
"""simple docstring"""
with open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) as f:
_snake_case : Optional[int] = json.load(snake_case__ )
_snake_case : str = {}
_snake_case : Union[str, Any] = []
_snake_case : str = []
for key, info in class_info.items():
_snake_case : str = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_snake_case : Optional[Any] = thing_ids
_snake_case : str = class_names
return metadata
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict, a_: Any=7, a_: int=3, a_: List[Any]=30, a_: Tuple=400, a_: int=None, a_: List[Any]=True, a_: Dict=True, a_: Any=[0.5, 0.5, 0.5], a_: str=[0.5, 0.5, 0.5], a_: Dict=10, a_: List[str]=False, a_: Optional[Any]=255, a_: Optional[Any]="shi-labs/oneformer_demo", a_: Optional[Any]="ade20k_panoptic.json", a_: int=10, ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : str = num_channels
_snake_case : int = min_resolution
_snake_case : Tuple = max_resolution
_snake_case : int = do_resize
_snake_case : Optional[Any] = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
_snake_case : Union[str, Any] = do_normalize
_snake_case : Union[str, Any] = image_mean
_snake_case : Dict = image_std
_snake_case : List[str] = class_info_file
_snake_case : Dict = prepare_metadata(a_, a_ )
_snake_case : List[Any] = num_text
_snake_case : Tuple = repo_path
# for the post_process_functions
_snake_case : Tuple = 2
_snake_case : Any = 10
_snake_case : Optional[int] = 10
_snake_case : Any = 3
_snake_case : str = 4
_snake_case : List[str] = num_labels
_snake_case : Any = do_reduce_labels
_snake_case : Union[str, Any] = ignore_index
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self: int, a_: str, a_: Optional[int]=False ):
'''simple docstring'''
if not batched:
_snake_case : Tuple = image_inputs[0]
if isinstance(a_, Image.Image ):
_snake_case , _snake_case : List[str] = image.size
else:
_snake_case , _snake_case : Optional[int] = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
_snake_case : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
_snake_case : List[str] = self.size["""shortest_edge"""]
_snake_case : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
_snake_case : List[str] = self.size["""shortest_edge"""]
_snake_case : List[Any] = self.size["""shortest_edge"""]
else:
_snake_case : Optional[int] = []
for image in image_inputs:
_snake_case , _snake_case : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : Tuple = max(a_, key=lambda a_ : item[0] )[0]
_snake_case : str = max(a_, key=lambda a_ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowercase__ = image_processing_class
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """ignore_index""" ) )
self.assertTrue(hasattr(a_, """class_info_file""" ) )
self.assertTrue(hasattr(a_, """num_text""" ) )
self.assertTrue(hasattr(a_, """repo_path""" ) )
self.assertTrue(hasattr(a_, """metadata""" ) )
self.assertTrue(hasattr(a_, """do_reduce_labels""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : str = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : Tuple = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Dict = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Any = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : Tuple = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Any = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Tuple = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : Optional[Any] = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
_snake_case : Dict = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : str = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Dict, a_: List[Any]=False, a_: int=False, a_: Union[str, Any]="np" ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : Dict = self.image_processing_tester.num_labels
_snake_case : int = None
_snake_case : Union[str, Any] = None
_snake_case : Any = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_ )
if with_segmentation_maps:
_snake_case : str = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(a_ ) ) * 2
_snake_case : List[Any] = dict(enumerate(a_ ) )
_snake_case : Any = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : List[str] = [Image.fromarray(a_ ) for annotation in annotations]
_snake_case : Dict = image_processor(
a_, ["""semantic"""] * len(a_ ), a_, return_tensors="""pt""", instance_id_to_semantic_id=a_, pad_and_return_pixel_mask=a_, )
return inputs
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
def common(a_: List[Any]=False, a_: Any=None ):
_snake_case : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=a_, is_instance_map=a_, segmentation_type=a_ )
_snake_case : str = inputs["""mask_labels"""]
_snake_case : str = inputs["""class_labels"""]
_snake_case : Optional[int] = inputs["""pixel_values"""]
_snake_case : int = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(a_, a_, a_ ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(a_ ), self.image_processing_tester.num_text )
common()
common(is_instance_map=a_ )
common(is_instance_map=a_, segmentation_type="""pil""" )
common(is_instance_map=a_, segmentation_type="""pil""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = np.zeros((20, 50) )
_snake_case : Any = 1
_snake_case : Dict = 1
_snake_case : Tuple = 1
_snake_case : List[Any] = binary_mask_to_rle(a_ )
self.assertEqual(len(a_ ), 4 )
self.assertEqual(rle[0], 21 )
self.assertEqual(rle[1], 45 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(a_ )
self.assertEqual(len(a_ ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
_snake_case : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Dict = fature_extractor.post_process_semantic_segmentation(a_, target_sizes=a_ )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : str = image_processor.post_process_instance_segmentation(a_, threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), a_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Optional[Any] = image_processor.post_process_panoptic_segmentation(a_, threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), a_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 64 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_( _lowerCamelCase ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 340 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ ( lowercase_ ):
lowerCAmelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ : int = deprecated_arg[3:]
UpperCAmelCase__ : str = not kwargs.pop(a__ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase__ : List[str] = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCAmelCase__ : Optional[int] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCAmelCase__ : Optional[Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCAmelCase__ : Tuple = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**a__ )
lowerCAmelCase__ = field(
default=lowercase_ , metadata={'help': 'Name of TPU'} , )
lowerCAmelCase__ = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCAmelCase__ = field(default=lowercase_ , metadata={'help': 'Benchmark models in eager model.'} )
lowerCAmelCase__ = field(
default=lowercase_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCAmelCase__ : Optional[int] = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase__ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase__ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase__ : List[Any] = None
return tpu
@cached_property
def lowercase_ ( self : Any ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase__ : str = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCAmelCase__ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCAmelCase__ : str = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.n_gpu > 0
| 354 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__snake_case : str = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__snake_case : List[str] = {
"""vinai/phobert-base""": 2_56,
"""vinai/phobert-large""": 2_56,
}
def _UpperCamelCase ( UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(UpperCamelCase_ )
return pairs
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = merges_file
lowerCAmelCase__ = {}
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
self.add_from_file(_UpperCamelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[:-1]
lowerCAmelCase__ = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = {}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(_UpperCamelCase ):
try:
lowerCAmelCase__ = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = new_word
if len(_UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
lowerCAmelCase__ = '@@ '.join(_UpperCamelCase )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(r'\S+\n?' , _UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCamelCase ).split(' ' ) ) )
return split_tokens
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCamelCase , self.unk_token )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ' '.join(_UpperCamelCase ).replace('@@ ' , '' ).strip()
return out_string
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.merges_file , _UpperCamelCase )
return out_vocab_file, out_merge_file
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_UpperCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
lowerCAmelCase__ = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ = lineTmp.strip()
lowerCAmelCase__ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
lowerCAmelCase__ = line[:idx]
lowerCAmelCase__ = len(self.encoder )
| 122 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case : Optional[Any] = TypeVar("""KEY""")
__snake_case : str = TypeVar("""VAL""")
@dataclass(frozen=__lowercase , slots=__lowercase)
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class __SCREAMING_SNAKE_CASE ( _Item):
def __init__( self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
__snake_case : int = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]):
def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCAmelCase__ = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 122 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Tuple = 16
UpperCAmelCase_ : List[str] = 32
def _A (__a , __a = 16 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Any = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : int = 8
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : int = mocked_dataloaders # noqa: F811
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Any = config['''lr''']
SCREAMING_SNAKE_CASE_ : int = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ : Tuple = int(config['''batch_size'''] )
set_seed(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_dataloaders(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : List[Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : int = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_00 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Tuple = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : List[str] = model(**__a )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**__a )
SCREAMING_SNAKE_CASE_ : str = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
SCREAMING_SNAKE_CASE_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} , step=__a , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__a , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
try:
with open(__a , '''rb''' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__a ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__a , __a )
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values()
if any(__a ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a )
SCREAMING_SNAKE_CASE_ : int = ''''''
SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a )
# remove from missing keys
missing_keys.remove(__a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__a )
pt_model.load_state_dict(__a )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ : int = list(__a )
if len(__a ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__a ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 91 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """convbert"""
def __init__( self , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=7_68 , __magic_name__=2 , __magic_name__=9 , __magic_name__=1 , __magic_name__=None , **__magic_name__ , ) -> Optional[Any]:
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 356 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Any = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Optional[int] = plt.figure(1)
A : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 6 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCamelCase = HfApi()
__UpperCamelCase = {}
# fmt: off
__UpperCamelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__UpperCamelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__UpperCamelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__UpperCamelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__UpperCamelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__UpperCamelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__UpperCamelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__UpperCamelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__UpperCamelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__UpperCamelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__UpperCamelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__UpperCamelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__UpperCamelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__UpperCamelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__UpperCamelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__UpperCamelCase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCamelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 113 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=13 , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : Dict=32 , SCREAMING_SNAKE_CASE : Tuple=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Tuple=64 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[str]=1 , ):
_A : int = parent
_A : Union[str, Any] = batch_size
_A : Optional[Any] = seq_length
_A : Optional[int] = is_training
_A : Optional[int] = use_input_mask
_A : List[Any] = use_token_type_ids
_A : Union[str, Any] = use_labels
_A : List[str] = vocab_size
_A : str = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Dict = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Optional[Any] = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : Union[str, Any] = initializer_range
_A : Optional[Any] = num_labels
_A : Dict = num_choices
_A : Dict = scope
_A : List[str] = q_groups
_A : Optional[int] = k_groups
_A : Tuple = v_groups
_A : Optional[int] = post_attention_groups
_A : Optional[Any] = intermediate_groups
_A : str = output_groups
def A ( self : Optional[int]):
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Tuple = None
if self.use_input_mask:
_A : Any = random_attention_mask([self.batch_size, self.seq_length])
_A : Union[str, Any] = None
_A : List[str] = None
_A : Tuple = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_A : int = ids_tensor([self.batch_size] , self.num_choices)
_A : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Any):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict):
_A : Tuple = SqueezeBertModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : str = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]):
_A : List[str] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : Any = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : int = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = self.num_labels
_A : Tuple = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]):
_A : Dict = self.num_labels
_A : int = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]):
_A : str = self.num_choices
_A : List[str] = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_A : Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_A : Dict = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A ( self : Tuple):
_A : List[str] = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Optional[Any] = config_and_inputs
_A : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = True
a = False
def A ( self : Union[str, Any]):
_A : Union[str, Any] = SqueezeBertModelTester(self)
_A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=37)
def A ( self : List[str]):
self.config_tester.run_common_tests()
def A ( self : int):
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE)
def A ( self : Dict):
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE)
def A ( self : str):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : str):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Dict = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any]):
_A : List[Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_A : Dict = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
_A : List[Any] = model(SCREAMING_SNAKE_CASE)[0]
_A : Tuple = torch.Size((1, 3))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-4))
| 227 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A : int = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=None , ):
_A , _A , _A , _A : Any = bos, unk, pad, eos
_A : Optional[Any] = []
_A : Optional[Any] = []
_A : Optional[int] = {}
_A : Dict = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : str = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : Any = self.add_symbol(SCREAMING_SNAKE_CASE)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = len(self.symbols)
def __eq__( self : int , SCREAMING_SNAKE_CASE : Optional[Any]):
return self.indices == other.indices
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Union[str, Any]):
return len(self.symbols)
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]):
return sym in self.indices
@classmethod
def A ( cls : Dict , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : Any = cls()
d.add_from_file(SCREAMING_SNAKE_CASE)
return d
def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : int=False):
if word in self.indices and not overwrite:
_A : str = self.indices[word]
_A : List[str] = self.count[idx] + n
return idx
else:
_A : Optional[Any] = len(self.symbols)
_A : Union[str, Any] = idx
self.symbols.append(SCREAMING_SNAKE_CASE)
self.count.append(SCREAMING_SNAKE_CASE)
return idx
def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]):
return 0
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
try:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE))
return
_A : Union[str, Any] = f.readlines()
_A : Any = self._load_meta(SCREAMING_SNAKE_CASE)
for line in lines[indices_start_line:]:
try:
_A , _A : List[str] = line.rstrip().rsplit(' ' , 1)
if field == "#fairseq:overwrite":
_A : int = True
_A , _A : List[str] = line.rsplit(' ' , 1)
else:
_A : Union[str, Any] = False
_A : List[str] = int(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE))
self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE)
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'')
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Union[str, Any] = dict((re.sub(R'@@$' ,'' ,lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' ,'</w>' ,lowerCamelCase ), v) for k, v in d.items() )
_A : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_A : str = d[k] # restore
return da
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] ):
# prep
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_A : Dict = os.path.join(lowerCamelCase ,'checkpoint.pt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_A : int = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Dict = chkpt['cfg']['model']
# dicts
_A : Any = os.path.join(lowerCamelCase ,'dict.txt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_A : Any = Dictionary.load(lowerCamelCase )
_A : Optional[int] = rewrite_dict_keys(src_dict.indices )
_A : List[Any] = len(lowerCamelCase )
_A : str = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# merges_file (bpecodes)
_A : Optional[int] = os.path.join(lowerCamelCase ,'bpecodes' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_A : Dict = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCamelCase ,lowerCamelCase )
# model config
_A : str = os.path.join(lowerCamelCase ,'config.json' )
_A : int = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# tokenizer config
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
_A : Any = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# model
_A : List[Any] = chkpt['model']
# remove unneeded keys
_A : int = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase ,lowerCamelCase )
_A : Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A : str = model_state_dict.pop(lowerCamelCase )
else:
_A : Dict = model_state_dict.pop(lowerCamelCase )
_A : Any = BioGptConfig.from_pretrained(lowerCamelCase )
_A : Union[str, Any] = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase ,lowerCamelCase )
print('Conversion is done!' )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 227 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''Salesforce/blip-image-captioning-base'''
UpperCAmelCase : Tuple = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCAmelCase : List[str] = '''image_captioner'''
UpperCAmelCase : Optional[int] = AutoModelForVisionaSeq
UpperCAmelCase : str = ['''image''']
UpperCAmelCase : str = ['''text''']
def __init__( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ):
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : "Image" ):
return self.pre_processor(images=_UpperCAmelCase , return_tensors='pt' )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any ):
return self.model.generate(**_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Tuple ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
| 315 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
'''simple docstring'''
@staticmethod
def a__ (*lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ObjectDetectionPipeline(model=lowerCamelCase_, image_processor=lowerCamelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png', threshold=0.0 )
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
import datasets
lowerCamelCase__ : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils', 'image', split='test' )
lowerCamelCase__ : Tuple = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCamelCase__ : Optional[int] = object_detector(lowerCamelCase_, threshold=0.0 )
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def a__ (self ):
'''simple docstring'''
pass
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCamelCase__ : int = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
], )
lowerCamelCase__ : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'facebook/detr-resnet-50'
lowerCamelCase__ : List[Any] = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : Any = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'facebook/detr-resnet-50'
lowerCamelCase__ : List[Any] = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 0.9_985
lowerCamelCase__ : str = 'facebook/detr-resnet-50'
lowerCamelCase__ : str = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
@require_torch
@require_pytesseract
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCamelCase__ : Any = 0.9_993
lowerCamelCase__ : List[Any] = pipeline('object-detection', model=lowerCamelCase_, threshold=lowerCamelCase_ )
lowerCamelCase__ : str = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
], )
| 316 |
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=None ):
__a : Tuple = None
if token is not None:
__a : int = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
__a : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__a : List[str] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
__a : Any = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__a : Optional[int] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
__a : int = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int=None ):
__a : List[str] = None
if token is not None:
__a : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
__a : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__a : Dict = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
__a : Union[str, Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__a : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
__a : Any = requests.get(url + F"""&page={i + 2}""" , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
__a : List[Any] = None
if token is not None:
__a : int = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
__a : str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = result.headers['Location']
__a : Optional[Any] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
__a : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
__a : List[str] = []
__a : int = []
__a : int = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
__a : List[Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a : Tuple = line[: line.index(': ' )]
__a : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__a : Tuple = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
__a : List[str] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` """
F"""and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
__a : Optional[Any] = None
if job_name and job_links:
__a : Tuple = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
__a : str = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=None ):
__a : Optional[int] = []
__a : str = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any=None ):
__a : List[Any] = Counter()
counter.update([x[1] for x in logs] )
__a : Dict = counter.most_common()
__a : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a : Union[str, Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__a : List[str] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Any = test.split('::' )[0]
if test.startswith('tests/models/' ):
__a : Tuple = test.split('/' )[2]
else:
__a : Union[str, Any] = None
return test
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a : Dict = [x for x in logs if x[2] is not None]
__a : Any = {x[2] for x in logs}
__a : Optional[int] = {}
for test in tests:
__a : Optional[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a : int = counter.most_common()
__a : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a : List[str] = sum(error_counts.values() )
if n_errors > 0:
__a : Optional[Any] = {'count': n_errors, 'errors': error_counts}
__a : Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Any = '| no. | error | status |'
__a : Optional[int] = '|-:|:-|:-|'
__a : Any = [header, sep]
for error in reduced_by_error:
__a : Union[str, Any] = reduced_by_error[error]['count']
__a : Union[str, Any] = F"""| {count} | {error[:100]} | |"""
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Union[str, Any] = '| model | no. of errors | major error | count |'
__a : Union[str, Any] = '|-:|-:|-:|-:|'
__a : Any = [header, sep]
for model in reduced_by_model:
__a : Optional[int] = reduced_by_model[model]['count']
__a , __a : Any = list(reduced_by_model[model]['errors'].items() )[0]
__a : Optional[int] = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__lowercase : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__lowercase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
__lowercase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__lowercase : List[Any] = k.find(' / ')
__lowercase : Tuple = k[index + len(' / ') :]
__lowercase : Union[str, Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__lowercase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__lowercase : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__lowercase : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__lowercase : str = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__lowercase : str = reduce_by_error(errors)
__lowercase : int = reduce_by_model(errors)
__lowercase : Optional[int] = make_github_table(reduced_by_error)
__lowercase : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 27 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = 1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = TFXGLMModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCAmelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
_UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCAmelCase : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] ) | 356 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2FeatureExtractor']
__a = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 17 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase : Tuple = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''audio_values''', '''audio_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=[16, 16] , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=44100 , SCREAMING_SNAKE_CASE=86 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=0.0 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : str = spectrogram_length
A : Optional[int] = num_channels
A : str = patch_size
A : Optional[Any] = feature_size // self.patch_size[1]
A : str = n_fft
A : Dict = sampling_rate // hop_length_to_sampling_rate
A : Optional[int] = sampling_rate
A : int = padding_value
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
A : Dict = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
A : int = log_spec[:, :-1]
A : Dict = log_spec - 20.0
A : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[str] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : str = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : int = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A : List[Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A : List[str] = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A : int = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A : Optional[Any] = audio_features[i]
A : str = feature
# return as BatchFeature
if return_attention_mask:
A : int = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
A : List[str] = {'''audio_values''': padded_audio_features}
A : Optional[int] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_inputs
| 3 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters | 359 |
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82 | 306 | 0 |
'''simple docstring'''
from copy import deepcopy
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ) -> None:
"""simple docstring"""
if arr is None and size is not None:
A : List[Any] = size
A : Any = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Either arr or size must be specified''' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Optional[Any] = len(SCREAMING_SNAKE_CASE )
A : Tuple = deepcopy(SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
A : List[str] = self.next_(SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowerCAmelCase ( self ) -> list[int]:
"""simple docstring"""
A : Tuple = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A : Dict = self.next_(SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return index - (index & (-index))
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A : Optional[int] = self.next_(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.add(SCREAMING_SNAKE_CASE , value - self.get(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if right == 0:
return 0
A : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A : Optional[Any] = self.prev(SCREAMING_SNAKE_CASE )
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.prefix(SCREAMING_SNAKE_CASE ) - self.prefix(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.query(SCREAMING_SNAKE_CASE , index + 1 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = []
A : Union[str, Any] = []
for i in range(self.num_layers ):
A : Any = self.in_channels if i == 0 else self.out_channels
A : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnets
A : Union[str, Any] = attentions
if self.add_downsample:
A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = []
for i in range(self.num_layers ):
A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
if self.add_downsample:
A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
A : str = ()
for resnet in self.resnets:
A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = []
A : Optional[int] = []
for i in range(self.num_layers ):
A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : Dict = self.prev_output_channel if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
A : Optional[Any] = attentions
if self.add_upsample:
A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A : List[str] = res_hidden_states_tuple[-1]
A : int = res_hidden_states_tuple[:-1]
A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = []
for i in range(self.num_layers ):
A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
A : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[Any] = resnets
if self.add_upsample:
A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A : Optional[int] = res_hidden_states_tuple[-1]
A : Optional[Any] = res_hidden_states_tuple[:-1]
A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A : List[Any] = []
for _ in range(self.num_layers ):
A : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[str] = resnets
A : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
return hidden_states
| 3 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase__ : Tuple = None
try:
import msvcrt
except ImportError:
lowercase__ : List[str] = None
try:
import fcntl
except ImportError:
lowercase__ : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase__ : List[str] = OSError
# Data
# ------------------------------------------------
lowercase__ : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase__ : List[Any] = """3.0.12"""
lowercase__ : int = None
def __lowercase ( ):
global _logger
snake_case_ : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case):
def __init__( self : Optional[Any] , lowercase_ : Dict ):
snake_case_ : Optional[int] = lock_file
return None
def __str__( self : Dict ):
snake_case_ : Tuple = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : int ):
snake_case_ : Optional[Any] = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : Dict , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Any ):
self.lock.release()
return None
class _UpperCAmelCase :
def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Dict=-1 , lowercase_ : str=None ):
snake_case_ : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case_ : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
snake_case_ : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case_ : Dict = None
# The default timeout value.
snake_case_ : List[Any] = timeout
# We use this lock primarily for the lock counter.
snake_case_ : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case_ : Optional[Any] = 0
return None
@property
def _snake_case ( self : Dict ):
return self._lock_file
@property
def _snake_case ( self : Any ):
return self._timeout
@timeout.setter
def _snake_case ( self : str , lowercase_ : Optional[Any] ):
snake_case_ : Dict = float(a_ )
return None
def _snake_case ( self : Optional[Any] ):
raise NotImplementedError()
def _snake_case ( self : Dict ):
raise NotImplementedError()
@property
def _snake_case ( self : Tuple ):
return self._lock_file_fd is not None
def _snake_case ( self : str , lowercase_ : Tuple=None , lowercase_ : Any=0.05 ):
if timeout is None:
snake_case_ : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case_ : Optional[int] = id(self )
snake_case_ : str = self._lock_file
snake_case_ : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case_ : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _snake_case ( self : int , lowercase_ : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case_ : Tuple = id(self )
snake_case_ : str = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
snake_case_ : Dict = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : str ):
self.acquire()
return self
def __exit__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Dict ):
self.release()
return None
def __del__( self : Union[str, Any] ):
self.release(force=a_ )
return None
def _snake_case ( self : int , lowercase_ : Dict , lowercase_ : List[str] ):
snake_case_ : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
snake_case_ : List[Any] = os.path.dirname(a_ )
snake_case_ : Any = str(hash(a_ ) )
snake_case_ : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case):
def __init__( self : str , lowercase_ : str , lowercase_ : int=-1 , lowercase_ : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
snake_case_ : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def _snake_case ( self : Tuple ):
snake_case_ : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case_ : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
snake_case_ : Dict = fd
return None
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = self._lock_file_fd
snake_case_ : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case):
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple=-1 , lowercase_ : Union[str, Any]=None ):
snake_case_ : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case_ : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
snake_case_ : Optional[int] = fd
return None
def _snake_case ( self : Tuple ):
snake_case_ : Dict = self._lock_file_fd
snake_case_ : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case):
def _snake_case ( self : List[Any] ):
snake_case_ : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case_ : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
snake_case_ : List[Any] = fd
return None
def _snake_case ( self : str ):
os.close(self._lock_file_fd )
snake_case_ : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase__ : Dict = None
if msvcrt:
lowercase__ : List[Any] = WindowsFileLock
elif fcntl:
lowercase__ : List[str] = UnixFileLock
else:
lowercase__ : str = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 352 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
class __snake_case ( metaclass=__lowerCAmelCase ):
a__ = ["""sentencepiece"""]
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'])
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( A__: List[str]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('''test''' )
else:
UpperCAmelCase = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def _lowerCAmelCase ( A__: List[str] ):
'''simple docstring'''
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = F"""--config_file={args.config_file} {script_name}"""
UpperCAmelCase = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main()
| 364 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCAmelCase ( A__: List[Any] , A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('''RGB''' )
UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase = transform(A__ ).unsqueeze(0 ).to(A__ )
return image
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , A__ )
if "blocks" in key:
UpperCAmelCase = re.sub(r'''blocks''' , '''layers''' , A__ )
if "attn" in key:
UpperCAmelCase = re.sub(r'''attn''' , '''self_attn''' , A__ )
if "norm1" in key:
UpperCAmelCase = re.sub(r'''norm1''' , '''layer_norm1''' , A__ )
if "norm2" in key:
UpperCAmelCase = re.sub(r'''norm2''' , '''layer_norm2''' , A__ )
if "encoder.norm" in key:
UpperCAmelCase = re.sub(r'''encoder.norm''' , '''post_layernorm''' , A__ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , A__ )
if "encoder.pos_embed" in key:
UpperCAmelCase = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , A__ )
if "encoder.cls_token" in key:
UpperCAmelCase = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , A__ )
if "self_attn" in key:
UpperCAmelCase = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , A__ )
return key
@torch.no_grad()
def _lowerCAmelCase ( A__: List[Any] , A__: Any=None ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = BlipConfig.from_pretrained(A__ )
else:
UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase = BlipForConditionalGeneration(A__ ).eval()
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
UpperCAmelCase = blip_decoder(pretrained=A__ , image_size=384 , vit='''base''' )
UpperCAmelCase = pt_model.eval()
UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
hf_model.load_state_dict(A__ )
UpperCAmelCase = 384
UpperCAmelCase = load_demo_image(image_size=A__ , device='''cpu''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase = tokenizer(['''a picture of'''] ).input_ids
UpperCAmelCase = hf_model.generate(A__ , A__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase = hf_model.generate(A__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
UpperCAmelCase = blip_vqa(pretrained=A__ , image_size=A__ , vit='''base''' )
vqa_model.eval()
UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
UpperCAmelCase = BlipForQuestionAnswering(A__ )
hf_vqa_model.load_state_dict(A__ )
UpperCAmelCase = ['''How many dogs are in this image?''']
UpperCAmelCase = tokenizer(A__ , return_tensors='''pt''' ).input_ids
UpperCAmelCase = hf_vqa_model.generate(A__ , A__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
UpperCAmelCase = blip_itm(pretrained=A__ , image_size=A__ , vit='''base''' )
itm_model.eval()
UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
UpperCAmelCase = BlipForImageTextRetrieval(A__ )
UpperCAmelCase = ['''A picture of a woman with a dog sitting in a beach''']
UpperCAmelCase = tokenizer(
A__ , return_tensors='''pt''' , padding='''max_length''' , truncation=A__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A__ )
hf_itm_model.eval()
UpperCAmelCase = hf_itm_model(A__ , A__ , use_itm_head=A__ )
UpperCAmelCase = hf_itm_model(A__ , A__ , use_itm_head=A__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__magic_name__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 152 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a = '\\n\n'
_a = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_a = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ), reference_urls=["https://huggingface.co/docs/transformers/perplexity"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : int = 1_6, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowercase = "cuda"
else:
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
__lowercase = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
__lowercase = model.to(UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowercase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowercase = model.config.max_length - 1
else:
__lowercase = model.config.max_length
__lowercase = tokenizer(
UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, padding=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=UpperCAmelCase__, return_tensors="pt", return_attention_mask=UpperCAmelCase__, ).to(UpperCAmelCase__ )
__lowercase = encodings["input_ids"]
__lowercase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ), 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ), 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowercase = []
__lowercase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0, len(UpperCAmelCase__ ), UpperCAmelCase__ ) ):
__lowercase = min(start_index + batch_size, len(UpperCAmelCase__ ) )
__lowercase = encoded_texts[start_index:end_index]
__lowercase = attn_masks[start_index:end_index]
if add_start_token:
__lowercase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase__ )
__lowercase = torch.cat([bos_tokens_tensor, encoded_batch], dim=1 )
__lowercase = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa ).to(UpperCAmelCase__ ), attn_mask], dim=1 )
__lowercase = encoded_batch
with torch.no_grad():
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ ).logits
__lowercase = out_logits[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = attn_mask[..., 1:].contiguous()
__lowercase = torch.expa(
(loss_fct(shift_logits.transpose(1, 2 ), UpperCAmelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase__ )}
| 17 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , A : str , A : List[str]=1_0_0 , A : List[str]=1_3 , A : Union[str, Any]=3_0 , A : Union[str, Any]=2 , A : List[Any]=3 , A : Any=True , A : Tuple=True , A : Tuple=3_2 , A : str=5 , A : Any=4 , A : List[str]=3_7 , A : Tuple="gelu" , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : Union[str, Any]=1_0 , A : List[str]=0.02 , A : Dict=3 , ) ->int:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Tuple = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Tuple = num_patches + 1
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __lowerCamelCase ( self : List[Any] , A : str , A : List[Any] , A : Any ) ->Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxBeitModel(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : List[str] , A : Optional[int] , A : Dict ) ->Optional[int]:
lowerCamelCase__ : Dict = FlaxBeitForMaskedImageModeling(config=A )
lowerCamelCase__ : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : Optional[int] , A : List[Any] ) ->Any:
lowerCamelCase__ : Tuple = self.type_sequence_label_size
lowerCamelCase__ : Tuple = FlaxBeitForImageClassification(config=A )
lowerCamelCase__ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[int] = FlaxBeitForImageClassification(A )
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(A )
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : int = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCamelCase ( self : str ) ->None:
lowerCamelCase__ : Dict = FlaxBeitModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCamelCase ( self : List[str] ) ->Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(A )
lowerCamelCase__ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCamelCase ( self : int ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(A , A )
lowerCamelCase__ : Optional[int] = model_class(A )
@jax.jit
def model_jitted(A : str , **A : Optional[int] ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : str = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Union[str, Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self : str ) ->str:
lowerCamelCase__ : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCamelCase__ : Optional[Any] = self.default_image_processor
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCamelCase__ : List[str] = np.ones((1, 1_9_6) , dtype=A )
# forward pass
lowerCamelCase__ : Optional[int] = model(pixel_values=A , bool_masked_pos=A )
lowerCamelCase__ : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase__ : str = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1e-2 ) )
@slow
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : List[str] = model(**A )
lowerCamelCase__ : Optional[int] = outputs.logits
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : Any = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=A , return_tensors='''np''' )
# forward pass
lowerCamelCase__ : Union[str, Any] = model(**A )
lowerCamelCase__ : Any = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , A )
lowerCamelCase__ : str = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
lowerCamelCase__ : List[Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , A )
| 142 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
_A = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase_ ( A__ ) -> Tuple:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=A__ )
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = tmp_path_factory.getbasetemp() / "cache"
snake_case = test_hf_cache_home / "datasets"
snake_case = test_hf_cache_home / "metrics"
snake_case = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(A__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(A__ ) )
snake_case = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(A__ ) )
snake_case = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) )
@pytest.fixture(autouse=A__ , scope="session" )
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , A__ )
@pytest.fixture
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , A__ )
| 368 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 137 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__snake_case ,cache_dir=__snake_case )
a__ = [t[-1] for t in os.walk(os.path.join(__snake_case ,os.listdir(__snake_case )[0] ,'snapshots' ) )]
a__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__snake_case )
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.random.PRNGKey(0 )
a__ = 4
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
a__ = replicate(__snake_case )
a__ = jax.random.split(__snake_case ,__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case ,dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
a__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def lowerCamelCase__( self :int ) -> int:
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__snake_case )
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
a__ = replicate(__snake_case )
a__ = jax.random.split(__snake_case ,__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__snake_case )
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
a__ = replicate(__snake_case )
a__ = jax.random.split(__snake_case ,__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def lowerCamelCase__( self :str ) -> Dict:
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
a__ = replicate(__snake_case )
a__ = jax.random.split(__snake_case ,__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def lowerCamelCase__( self :int ) -> List[Any]:
a__ = FlaxDDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__snake_case ,steps_offset=1 ,)
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__snake_case ,safety_checker=__snake_case ,)
a__ = scheduler.create_state()
a__ = scheduler_state
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
a__ = replicate(__snake_case )
a__ = jax.random.split(__snake_case ,__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = jax.random.split(jax.random.PRNGKey(0 ) ,__snake_case )
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,)
a__ = replicate(__snake_case )
a__ = pipeline.prepare_inputs(__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a__ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,use_memory_efficient_attention=__snake_case ,)
a__ = replicate(__snake_case )
a__ = pipeline.prepare_inputs(__snake_case )
a__ = shard(__snake_case )
a__ = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a__ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 240 | """simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=7_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = np.dot(x.T , h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = datasets.load_iris()
SCREAMING_SNAKE_CASE__ = iris.data[:, :2]
SCREAMING_SNAKE_CASE__ = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE__ = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 150 | 0 |
"""simple docstring"""
from collections.abc import Callable
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = a
__lowerCAmelCase = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__lowerCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__lowerCAmelCase = mid
else:
__lowerCAmelCase = mid
__lowerCAmelCase = start + (end - start) / 2.0
return mid
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = 2
while True:
__lowerCAmelCase = factor_map.pop(_UpperCamelCase , _UpperCamelCase )
if factor:
__lowerCAmelCase = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase = factor
else:
__lowerCAmelCase = prime
yield prime
prime += 1
def _lowerCamelCase ( _UpperCamelCase = 1e10 ):
'''simple docstring'''
__lowerCAmelCase = sieve()
__lowerCAmelCase = 1
while True:
__lowerCAmelCase = next(_UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 259 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (UpperCAmelCase__ ):
__snake_case : Union[str, Any] = ["pixel_values"]
def __init__( self: List[str] , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Dict[str, int]] = None , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[int, float] = 1 / 255 , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 256}
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self: Any , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: float , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: ImageInput , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[float] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Tuple] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(snake_case__ ):
_SCREAMING_SNAKE_CASE = target_sizes.numpy()
_SCREAMING_SNAKE_CASE = []
for idx in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=snake_case__ )
_SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
_SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 306 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = torch.load(snake_case_ , map_location="cpu" )
_UpperCAmelCase = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
_UpperCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCAmelCase = v
else:
_UpperCAmelCase = v
_UpperCAmelCase = chkpt["params"]
_UpperCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case_ , (torch.FloatTensor, numpy.ndarray) )}
_UpperCAmelCase = chkpt["dico_word2id"]
_UpperCAmelCase = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + "\n" )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 133 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Dict ) -> List[Any]:
_a = old_name
if "patch_embed" in old_name:
_a = old_name.split("." )
if layer == "0":
_a = old_name.replace("0" , "convolution1" )
elif layer == "1":
_a = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_a = old_name.replace("3" , "convolution2" )
else:
_a = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowercase__ ):
_a = r'''\b\d{2}\b'''
if bool(re.search(lowercase__ , lowercase__ ) ):
_a = re.search(r"\d\.\d\d." , lowercase__ ).group()
else:
_a = re.search(r"\d\.\d." , lowercase__ ).group()
if int(match[0] ) < 6:
_a = old_name.replace(lowercase__ , "" )
_a = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_a = '''intermediate_stages.''' + trimmed_name
else:
_a = old_name.replace(lowercase__ , "" )
if int(match[2] ) < num_meta4D_last_stage:
_a = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_a = str(int(match[2] ) - num_meta4D_last_stage )
_a = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_a = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_a = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_a = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_a = trimmed_name.replace("fc2" , "linear_out" )
_a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowercase__ ):
_a = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_a = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_a = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_a = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_a = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a = new_name.replace("norm" , "layernorm" )
_a = '''efficientformer.''' + new_name
else:
_a = '''efficientformer.encoder.''' + new_name
return new_name
def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[int] ) -> int:
for key in checkpoint.copy().keys():
_a = checkpoint.pop(lowercase__ )
_a = val
return checkpoint
def _lowerCamelCase ( ) -> List[Any]:
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Dict , lowercase : List[Any] , lowercase : Dict ) -> str:
_a = torch.load(lowercase__ , map_location="cpu" )['''model''']
_a = EfficientFormerConfig.from_json_file(lowercase__ )
_a = EfficientFormerForImageClassificationWithTeacher(lowercase__ )
_a = '''_'''.join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_a = config.depths[-1] - config.num_metaad_blocks + 1
_a = convert_torch_checkpoint(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
_a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
_a = prepare_img()
_a = 256
_a = 224
_a = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_a = processor(images=lowercase__ , return_tensors="pt" ).pixel_values
# original processing pipeline
_a = Compose(
[
Resize(lowercase__ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowercase__ ),
ToTensor(),
Normalize(lowercase__ , lowercase__ ),
] )
_a = image_transforms(lowercase__ ).unsqueeze(0 )
assert torch.allclose(lowercase__ , lowercase__ )
_a = model(lowercase__ )
_a = outputs.logits
_a = (1, 1000)
if "l1" in model_name:
_a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowercase__ )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="Add model" , use_temp_dir=lowercase__ , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="Add image processor" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase_ : Any = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 63 |
def _lowercase ( lowercase__ ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCAmelCase : int = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
_UpperCamelCase = input("Enter a string ").strip()
_UpperCamelCase = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 275 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = KandinskyVaaPipeline
UpperCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''']
UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return 1_00
@property
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**A)
return model
@property
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type='epsilon' , thresholding=A , )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self : List[str] , A : str , A : Tuple=0) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
A)
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = pipe(**self.get_dummy_inputs(A))
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(A) , return_dict=A , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy')
_UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(A)
_UpperCAmelCase = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa)
_UpperCAmelCase = pipeline.to(A)
pipeline.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'red cat, 4k photo'
_UpperCAmelCase = torch.Generator(device='cuda').manual_seed(0)
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCAmelCase = torch.Generator(device='cuda').manual_seed(0)
_UpperCAmelCase = pipeline(
image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=1_00 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A)
| 290 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = AudioLDMPipeline
lowerCAmelCase__ = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__UpperCAmelCase , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__lowerCamelCase = ClapTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
__lowerCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , )
__lowerCamelCase = SpeechTaHifiGan(__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 256
__lowerCamelCase = audio[:10]
__lowerCamelCase = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * [inputs['''prompt''']]
# forward
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
__lowerCamelCase = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs['''input_ids'''].to(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
__lowerCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCamelCase = F.normalize(__UpperCAmelCase , dim=-1 )
__lowerCamelCase = prompt_embeds
# forward
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
# forward
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
__lowerCamelCase = []
for p in [prompt, negative_prompt]:
__lowerCamelCase = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs['''input_ids'''].to(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
__lowerCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCamelCase = F.normalize(__UpperCAmelCase , dim=-1 )
embeds.append(__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = embeds
# forward
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = '''egg cracking'''
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 256
__lowerCamelCase = audio[:10]
__lowerCamelCase = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
__lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowerCamelCase = 2
__lowerCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__lowerCamelCase = 2
__lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__lowerCamelCase = 2
__lowerCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.vocoder.config.sampling_rate
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.016
__lowerCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__UpperCAmelCase )
__lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.032
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = ['''hey''']
__lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
__lowerCamelCase = output.audios.shape
assert audio_shape == (1, 256)
__lowerCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowerCamelCase = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
__lowerCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 128, 16) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = 25
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 81920
__lowerCamelCase = audio[77230:77240]
__lowerCamelCase = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
__lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
__lowerCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 81920
__lowerCamelCase = audio[27780:27790]
__lowerCamelCase = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
__lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 330 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _UpperCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = module
__lowerCamelCase = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74
lowerCAmelCase__ = """Hello my name is"""
lowerCAmelCase__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
# Models and tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) )
__lowerCamelCase = config.to_dict()
__lowerCamelCase = config.to_diff_dict()
__lowerCamelCase = config.to_json_string()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowerCamelCase = self.model_fpaa.get_memory_footprint()
__lowerCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
__lowerCamelCase = True
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_fpaa.to(torch.floataa )
__lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.float()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = '''t5-small'''
__lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCamelCase = '''Translate in German: Hello, my dog is cute'''
def lowerCamelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCamelCase = None
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
__lowerCamelCase = modules
def lowerCamelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__lowerCamelCase = '''bigscience/bloom-560m'''
__lowerCamelCase = '''t5-small'''
# Different types of model
__lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Sequence classification model
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# CausalLM model
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Seq2seq model
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__lowerCamelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCamelCase = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """gpt2-xl"""
lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
| 330 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : BigBirdConfig
a : jnp.dtype = jnp.floataa
a : bool = True
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
super().setup()
__lowercase = nn.Dense(5 ,dtype=self.dtype )
def __call__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = super().__call__(*_lowerCamelCase ,**_lowerCamelCase )
__lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
def cross_entropy(lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple=None ):
__lowercase = logits.shape[-1]
__lowercase = (labels[..., None] == jnp.arange(lowerCamelCase_ )[None]).astype('''f4''' )
__lowercase = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
__lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowercase = reduction(lowerCamelCase_ )
return loss
__lowercase = partial(lowerCamelCase_ , reduction=jnp.mean )
__lowercase = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowercase :
'''simple docstring'''
a : str = "google/bigbird-roberta-base"
a : int = 3000
a : int = 1_0500
a : int = 128
a : int = 3
a : int = 1
a : int = 5
# tx_args
a : float = 3e-5
a : float = 0.0
a : int = 2_0000
a : float = 0.00_95
a : str = "bigbird-roberta-natural-questions"
a : str = "training-expt"
a : str = "data/nq-training.jsonl"
a : str = "data/nq-validation.jsonl"
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=_lowerCamelCase )
__lowercase = os.path.join(self.base_dir ,self.save_dir )
__lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowercase :
'''simple docstring'''
a : int
a : int = 4096 # no dynamic padding on TPUs
def __call__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = self.collate_fn(_lowerCamelCase )
__lowercase = jax.tree_util.tree_map(_lowerCamelCase ,_lowerCamelCase )
return batch
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.fetch_inputs(features['''input_ids'''] )
__lowercase = {
'''input_ids''': jnp.array(_lowerCamelCase ,dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_lowerCamelCase ,dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] ,dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] ,dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] ,dtype=jnp.intaa ),
}
return batch
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = [1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int=None ):
if seed is not None:
__lowercase = dataset.shuffle(seed=lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) // batch_size ):
__lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_ )
@partial(jax.pmap , axis_name='''batch''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
def loss_fn(lowerCamelCase_ : List[str] ):
__lowercase = model_inputs.pop('''start_labels''' )
__lowercase = model_inputs.pop('''end_labels''' )
__lowercase = model_inputs.pop('''pooled_labels''' )
__lowercase = state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
__lowercase , __lowercase = jax.random.split(lowerCamelCase_ )
__lowercase = jax.value_and_grad(lowerCamelCase_ )
__lowercase , __lowercase = grad_fn(state.params )
__lowercase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__lowercase = jax.lax.pmean(lowerCamelCase_ , '''batch''' )
__lowercase = state.apply_gradients(grads=lowerCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[Any] ):
__lowercase = model_inputs.pop('''start_labels''' )
__lowercase = model_inputs.pop('''end_labels''' )
__lowercase = model_inputs.pop('''pooled_labels''' )
__lowercase = state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_ )
__lowercase , __lowercase , __lowercase = outputs
__lowercase = state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class __lowercase ( train_state.TrainState ):
'''simple docstring'''
a : Callable = struct.field(pytree_node=lowerCAmelCase__ )
@dataclass
class __lowercase :
'''simple docstring'''
a : Args
a : Callable
a : Callable
a : Callable
a : Callable
a : wandb
a : Callable = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = model.params
__lowercase = TrainState.create(
apply_fn=model.__call__ ,params=_lowerCamelCase ,tx=_lowerCamelCase ,loss_fn=_lowerCamelCase ,)
if ckpt_dir is not None:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = restore_checkpoint(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__lowercase , __lowercase = build_tx(**_lowerCamelCase )
__lowercase = train_state.TrainState(
step=_lowerCamelCase ,apply_fn=model.__call__ ,params=_lowerCamelCase ,tx=_lowerCamelCase ,opt_state=_lowerCamelCase ,)
__lowercase = args
__lowercase = data_collator
__lowercase = lr
__lowercase = params
__lowercase = jax_utils.replicate(_lowerCamelCase )
return state
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.args
__lowercase = len(_lowerCamelCase ) // args.batch_size
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCamelCase ,jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase = jnp.array(0 ,dtype=jnp.floataa )
__lowercase = get_batched_dataset(_lowerCamelCase ,args.batch_size ,seed=_lowerCamelCase )
__lowercase = 0
for batch in tqdm(_lowerCamelCase ,total=_lowerCamelCase ,desc=f"Running EPOCH-{epoch}" ):
__lowercase = self.data_collator(_lowerCamelCase )
__lowercase , __lowercase , __lowercase = self.train_step_fn(_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__lowercase = jax_utils.unreplicate(state.step )
__lowercase = running_loss.item() / i
__lowercase = self.scheduler_fn(state_step - 1 )
__lowercase = self.evaluate(_lowerCamelCase ,_lowerCamelCase )
__lowercase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase ,commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_batched_dataset(_lowerCamelCase ,self.args.batch_size )
__lowercase = len(_lowerCamelCase ) // self.args.batch_size
__lowercase = jnp.array(0 ,dtype=jnp.floataa )
__lowercase = 0
for batch in tqdm(_lowerCamelCase ,total=_lowerCamelCase ,desc='''Evaluating ... ''' ):
__lowercase = self.data_collator(_lowerCamelCase )
__lowercase = self.val_step_fn(_lowerCamelCase ,**_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = jax_utils.unreplicate(_lowerCamelCase )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=''' ... ''' )
self.model_save_fn(_lowerCamelCase ,params=state.params )
with open(os.path.join(_lowerCamelCase ,'''opt_state.msgpack''' ) ,'''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(_lowerCamelCase ,'''args.joblib''' ) )
joblib.dump(self.data_collator ,os.path.join(_lowerCamelCase ,'''data_collator.joblib''' ) )
with open(os.path.join(_lowerCamelCase ,'''training_state.json''' ) ,'''w''' ) as f:
json.dump({'''step''': state.step.item()} ,_lowerCamelCase )
print('''DONE''' )
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(lowerCamelCase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__lowercase = from_bytes(state.opt_state , f.read() )
__lowercase = joblib.load(os.path.join(lowerCamelCase_ , '''args.joblib''' ) )
__lowercase = joblib.load(os.path.join(lowerCamelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCamelCase_ , '''training_state.json''' ) , '''r''' ) as f:
__lowercase = json.load(lowerCamelCase_ )
__lowercase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = num_train_steps - warmup_steps
__lowercase = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_ )
__lowercase = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1E-7 , transition_steps=lowerCamelCase_ )
__lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
def weight_decay_mask(lowerCamelCase_ : Optional[int] ):
__lowercase = traverse_util.flatten_dict(lowerCamelCase_ )
__lowercase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_ )
__lowercase = scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_ )
return tx, lr
| 352 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = 3
A__ = 250
A__ = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE__)
A__ = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = self._get_tensors(5)
A__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = MaxLengthCriteria(max_length=10)
A__ = self._get_tensors(5)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
A__ = self._get_tensors(5)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
A__ = self._get_tensors(5)
A__ = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(SCREAMING_SNAKE_CASE__):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
A__ = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) , 1)
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _lowerCamelCase):
A_ : Dict = ['image_processor', 'tokenizer']
A_ : Union[str, Any] = 'BlipImageProcessor'
A_ : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCAmelCase : Dict = self.tokenizer
__lowerCAmelCase : Dict = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
__lowerCAmelCase : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : str = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE )
return encoding_image_processor
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 182 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('123456789' )
def __lowerCAmelCase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase : Union[str, Any] = 10_0002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase : Dict = 100_2003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }') | 182 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'swinv2'
lowerCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : Union[str, Any] = depths
_lowerCAmelCase : Tuple = len(__a)
_lowerCAmelCase : Union[str, Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Union[str, Any] = use_absolute_embeddings
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(__a) - 1))
_lowerCAmelCase : Optional[int] = (0, 0, 0, 0)
| 36 |
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__magic_name__: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , **lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : List[str] = {}
__magic_name__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
__magic_name__ : Any = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__magic_name__ : Any = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__magic_name__ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__magic_name__ : Dict = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__magic_name__ : Optional[int] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__magic_name__ : str = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__magic_name__ : int = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__magic_name__ : Any = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__magic_name__ : int = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__magic_name__ : Tuple = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__magic_name__ : int = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__magic_name__ : Tuple = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=64 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 5_12 / 15_00 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 1 , ) -> Tuple:
__magic_name__ : List[str] = load_image(lowerCAmelCase__ )
__magic_name__ : str = self.image_processor.size["""longest_edge"""]
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : int = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = self.image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__magic_name__ : str = self.get_inference_context()
with inference_context():
__magic_name__ : int = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
__magic_name__ : str = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__magic_name__ : Optional[int] = image_embeddings
__magic_name__ : Tuple = grid_points.shape[1]
__magic_name__ : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[Any] = grid_points[:, i : i + points_per_batch, :, :]
__magic_name__ : Tuple = input_labels[:, i : i + points_per_batch]
__magic_name__ : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0.8_8 , lowerCAmelCase__=0.9_5 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , ) -> Optional[int]:
__magic_name__ : Dict = model_inputs.pop("""input_boxes""" )
__magic_name__ : Any = model_inputs.pop("""is_last""" )
__magic_name__ : Union[str, Any] = model_inputs.pop("""original_sizes""" ).tolist()
__magic_name__ : Any = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__magic_name__ : Dict = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__magic_name__ : int = model_outputs["""pred_masks"""]
__magic_name__ : str = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = model_outputs["""iou_scores"""]
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.7 , ) -> Tuple:
__magic_name__ : int = []
__magic_name__ : Tuple = []
__magic_name__ : int = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__magic_name__ : Optional[Any] = torch.cat(lowerCAmelCase__ )
__magic_name__ : int = torch.cat(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Dict = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = {}
if output_rle_mask:
__magic_name__ : List[str] = rle_mask
if output_bboxes_mask:
__magic_name__ : List[Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 138 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__magic_name__: Union[str, Any] = False
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowercase__ : Tuple = False
lowercase__ : List[str] = TEXT_TO_IMAGE_PARAMS
lowercase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __magic_name__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__magic_name__ : Any = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : int = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = """cpu"""
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : str = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__magic_name__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__magic_name__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __magic_name__ ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def __magic_name__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = torch.manual_seed(51 )
__magic_name__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__magic_name__ : List[str] = """a painting of an elephant with glasses"""
__magic_name__ : Any = [5, 7]
__magic_name__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , token_indices=lowerCAmelCase__ , guidance_scale=7.5 , generator=lowerCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__magic_name__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : str , __a : str , __a : Optional[int]=7 , __a : List[str]=3 , __a : Any=18 , __a : List[str]=30 , __a : Dict=400 , __a : Optional[Any]=True , __a : Optional[Any]=None , __a : int=True , __a : int=None , __a : List[str]=True , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def _lowercase (self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = LevitImageProcessor if is_vision_available() else None
def _lowercase (self : int ):
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def _lowercase (self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowercase (self : int ):
pass
def _lowercase (self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : Dict ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase (self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =BlipImageProcessor()
__lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
__lowercase =BlipaProcessor(_lowerCAmelCase , _lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Union[str, Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).tokenizer
def __lowerCamelCase ( self : Optional[Any] , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).image_processor
def __lowerCamelCase ( self : str):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__lowercase =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__lowercase =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0)
__lowercase =BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =image_processor(_lowerCAmelCase , return_tensors='np')
__lowercase =processor(images=_lowerCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =processor(text=_lowerCAmelCase)
__lowercase =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase):
processor()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase =processor.batch_decode(_lowerCAmelCase)
__lowercase =tokenizer.batch_decode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 166 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
snake_case = logging.getLogger(__name__)
torch.set_grad_enabled(False)
snake_case = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase__ ( lowercase , lowercase=100 , lowercase=" " ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = text.split(lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase ) , lowercase )]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(lowercase ):
titles.append(title if title is not None else "" )
texts.append(lowercase )
return {"title": titles, "text": texts}
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
SCREAMING_SNAKE_CASE : Union[str, Any] = ctx_encoder(input_ids.to(device=lowercase ) , return_dict=lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE : int = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(lowercase , batched=lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE : int = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE : Dict = dataset.map(
partial(lowercase , ctx_encoder=lowercase , ctx_tokenizer=lowercase ) , batched=lowercase , batch_size=processing_args.batch_size , features=lowercase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=lowercase )
# And save the index
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : str = field(
default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCamelCase_ : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCamelCase_ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCamelCase_ : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
snake_case = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
snake_case = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 356 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en/tasks"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
__magic_name__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__magic_name__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = TASK_GUIDE_TO_MODELS[task_guide]
__SCREAMING_SNAKE_CASE = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
__SCREAMING_SNAKE_CASE = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
__SCREAMING_SNAKE_CASE = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__magic_name__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 100 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( UpperCamelCase_ = "AAPL" ):
__SCREAMING_SNAKE_CASE = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCamelCase_ ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 100 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: str=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=99 , UpperCamelCase_: Tuple=64 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: List[str]=64 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: Any=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: str=3 , UpperCamelCase_: int=4 , UpperCamelCase_: str=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Union[str, Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = MPNetModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = MPNetForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = MPNetForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = MPNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCamelCase_ )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
__lowerCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 29 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Union[str, Any] = StableDiffusionDiffEditPipeline
__A : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
__A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
__A : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Any = frozenset([] )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowercase__ : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
lowercase__ : str = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_zero=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
lowercase__ : str = CLIPTextModel(lowercase_ )
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple=0 ) -> str:
lowercase__ : Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase__ : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
lowercase__ : List[Any] = torch.manual_seed(lowercase_ )
else:
lowercase__ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[int] = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str]=0 ) -> str:
lowercase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : List[str] = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" )
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
lowercase__ : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : str=0 ) -> int:
lowercase__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" )
if str(lowercase_ ).startswith("mps" ):
lowercase__ : List[Any] = torch.manual_seed(lowercase_ )
else:
lowercase__ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : int = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> int:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
lowercase__ : Tuple = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ : str = self.get_dummy_inputs(lowercase_ )
lowercase__ : Tuple = pipe_loaded(**lowercase_ )[0]
lowercase__ : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowercase_ , 1E-4 )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : List[Any] = "cpu"
lowercase__ : Any = self.get_dummy_components()
lowercase__ : Union[str, Any] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : List[Any] = self.get_dummy_mask_inputs(lowercase_ )
lowercase__ : int = pipe.generate_mask(**lowercase_ )
lowercase__ : int = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ : List[str] = np.array([0] * 9 )
lowercase__ : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : Union[str, Any] = "cpu"
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : List[str] = self.get_dummy_inversion_inputs(lowercase_ )
lowercase__ : Optional[int] = pipe.invert(**lowercase_ ).images
lowercase__ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowercase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : Tuple = "cpu"
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Dict = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowercase__ : Optional[int] = DPMSolverMultistepScheduler(**lowercase_ )
lowercase__ : Optional[int] = DPMSolverMultistepInverseScheduler(**lowercase_ )
lowercase__ : Tuple = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = self.get_dummy_inversion_inputs(lowercase_ )
lowercase__ : Tuple = pipe.invert(**lowercase_ ).images
lowercase__ : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowercase__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
@require_torch_gpu
@slow
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCamelCase ( cls : int ) -> List[Any]:
lowercase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowercase__ : Any = raw_image.convert("RGB" ).resize((7_68, 7_68) )
lowercase__ : List[Any] = raw_image
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
lowercase__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = "a bowl of fruit"
lowercase__ : Tuple = "a bowl of pears"
lowercase__ : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , )
lowercase__ : Optional[int] = pipe.invert(
prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ ).latents
lowercase__ : Any = pipe(
prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowercase__ : Dict = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = "a bowl of fruit"
lowercase__ : Tuple = "a bowl of pears"
lowercase__ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , )
lowercase__ : List[str] = pipe.invert(
prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ , num_inference_steps=25 , ).latents
lowercase__ : Optional[Any] = pipe(
prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowercase__ : str = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 87 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[torch.FloatTensor] = None , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCAmelCase )}.''' )
# get prompt text embeddings
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A , _A , _A = text_embeddings.shape
_A = text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
_A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [""]
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !='''
f''' {type(__UpperCAmelCase )}.''' )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
_A = negative_prompt
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = uncond_embeddings.shape[1]
_A = uncond_embeddings.repeat(__UpperCAmelCase , __UpperCAmelCase , 1 )
_A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A = torch.randn(
__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(self.device )
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
_A = torch.randn(
__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A = latents_reference.to(self.device )
_A = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_A = (latents_shape[3] - latents_shape_reference[3]) // 2
_A = (latents_shape[2] - latents_shape_reference[2]) // 2
_A = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_A = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_A = 0 if dx < 0 else dx
_A = 0 if dy < 0 else dy
_A = max(-dx , 0 )
_A = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_A = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = 1 / 0.18215 * latents
_A = self.vae.decode(__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_A = self.feature_extractor(self.numpy_to_pil(__UpperCAmelCase ) , return_tensors="pt" ).to(
self.device )
_A , _A = self.safety_checker(
images=__UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_A = None
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 174 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_A = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def __lowercase ( __lowercase ) -> Dict:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = tuple(__UpperCAmelCase )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = " ".join(__UpperCAmelCase )
_A = word
return word
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_A = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Any ):
'''simple docstring'''
_A = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = super()._pad(
encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_A = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCAmelCase )
if needs_to_be_padded:
_A = len(__UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_A = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 174 | 1 |
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(__lowerCAmelCase )
for i in range(1 ,__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE = collection[i]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = i - 1
while low <= high:
_SCREAMING_SNAKE_CASE = (low + high) // 2
if val < collection[mid]:
_SCREAMING_SNAKE_CASE = mid - 1
else:
_SCREAMING_SNAKE_CASE = mid + 1
for j in range(__lowerCAmelCase ,__lowerCAmelCase ,-1 ):
_SCREAMING_SNAKE_CASE = collection[j - 1]
_SCREAMING_SNAKE_CASE = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 306 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Dict = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Union[str, Any] = ['input_values', 'padding_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2_4000 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = chunk_length_s
UpperCamelCase : List[Any] = overlap
@property
def a_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = bool(
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase : List[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase : Any = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Any = [np.asarray(SCREAMING_SNAKE_CASE_ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE_ ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase : Tuple = min(array.shape[0] for array in raw_audio )
UpperCamelCase : List[str] = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase : int = max(array.shape[0] for array in raw_audio )
UpperCamelCase : str = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase : int = """max_length"""
else:
UpperCamelCase : List[str] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase : List[Any] = self.pad(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
if padding:
UpperCamelCase : int = padded_inputs.pop("""attention_mask""" )
UpperCamelCase : Optional[Any] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
UpperCamelCase : Union[str, Any] = example[..., None]
input_values.append(example.T )
UpperCamelCase : Dict = input_values
if return_tensors is not None:
UpperCamelCase : Any = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 354 |
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
UpperCAmelCase_ : int = tmp_path / '''file.csv'''
UpperCAmelCase_ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Any = tmp_path / '''malformed_file.csv'''
UpperCAmelCase_ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
UpperCAmelCase_ : Dict = tmp_path / '''csv_with_image.csv'''
UpperCAmelCase_ : int = textwrap.dedent(
F"""\\n image\n {image_file}\n """ )
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
UpperCAmelCase_ : int = tmp_path / '''csv_with_int_list.csv'''
UpperCAmelCase_ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = Csv()
UpperCAmelCase_ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__, match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE__, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Tuple = f.read().splitlines()[1]
UpperCAmelCase_ : Any = Csv(encoding='''utf-8''', features=Features({'''image''': Image()} ) )
UpperCAmelCase_ : List[str] = csv._generate_tables([[csv_file_with_image]] )
UpperCAmelCase_ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCAmelCase_ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE__, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : List[Any] = f.read().splitlines()[1:]
UpperCAmelCase_ : List[Any] = Csv(encoding='''utf-8''', features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCAmelCase_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
UpperCAmelCase_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCAmelCase_ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
UpperCAmelCase_ : List[str] = Csv(encoding='''utf-8''', sep=''',''', converters={'''int_list''': lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
UpperCAmelCase_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCAmelCase_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCAmelCase_ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 125 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __A ( lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
_UpperCAmelCase : List[str] = len(lowerCAmelCase_ ) // 2
_UpperCAmelCase : str = arr[0:mid]
_UpperCAmelCase : Any = arr[mid:]
_UpperCAmelCase : Tuple = count_inversions_recursive(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = count_inversions_recursive(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __A ( ):
_UpperCAmelCase : Dict = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCAmelCase : Dict = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCAmelCase : List[Any] = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase : Any = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
_UpperCAmelCase : Any = []
_UpperCAmelCase : Dict = count_inversions_bf(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Union[str, Any] = KandinskyVaaControlnetPipeline
snake_case : Dict = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case : str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case : str = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : str = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.dummy_unet
_UpperCAmelCase : str = self.dummy_movq
_UpperCAmelCase : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create hint
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = """cpu"""
_UpperCAmelCase : List[str] = self.get_dummy_components()
_UpperCAmelCase : str = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Any = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
_UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Union[str, Any] = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : Union[str, Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCAmelCase : str = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_UpperCAmelCase : int = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = """A robot, 4k photo"""
_UpperCAmelCase : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_UpperCAmelCase : Tuple = pipeline(
image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , output_type="""np""" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 170 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''owlvit_text_model'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=4_9_4_0_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : str=1_6 , SCREAMING_SNAKE_CASE__ : Any="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=1E-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_9_4_0_6 , SCREAMING_SNAKE_CASE__ : Dict=4_9_4_0_7 , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : List[Any] = hidden_size
a_ : Dict = intermediate_size
a_ : Dict = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : Any = max_position_embeddings
a_ : str = hidden_act
a_ : Optional[int] = layer_norm_eps
a_ : Tuple = attention_dropout
a_ : Any = initializer_range
a_ : Optional[Any] = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
a_ : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[Any] = '''owlvit_vision_model'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Any="quick_gelu" , SCREAMING_SNAKE_CASE__ : Tuple=1E-5 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = hidden_size
a_ : Optional[Any] = intermediate_size
a_ : Optional[Any] = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : List[str] = num_channels
a_ : Tuple = image_size
a_ : str = patch_size
a_ : str = hidden_act
a_ : Dict = layer_norm_eps
a_ : List[str] = attention_dropout
a_ : List[str] = initializer_range
a_ : List[str] = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
a_ : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[Any] = '''owlvit'''
snake_case__ : Dict = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=2.6592 , SCREAMING_SNAKE_CASE__ : List[str]=True , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
a_ : Optional[int] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
a_ : int = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
a_ : Union[str, Any] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Any = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : str = projection_dim
a_ : Any = logit_scale_init_value
a_ : List[Any] = return_dict
a_ : Optional[int] = 1.0
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
a_ : List[Any] = {}
a_ : List[str] = text_config
a_ : Any = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Optional[int] = copy.deepcopy(self.__dict__ )
a_ : Tuple = self.text_config.to_dict()
a_ : Optional[Any] = self.vision_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1E-4
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : int = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 1_4
| 32 | import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A ='▁'
__A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = BigBirdTokenizer
lowerCAmelCase :Any = BigBirdTokenizerFast
lowerCAmelCase :List[str] = True
lowerCAmelCase :Tuple = True
def snake_case__ ( self):
super().setUp()
UpperCAmelCase__ : Tuple = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : int = """<s>"""
UpperCAmelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """[MASK]""")
self.assertEqual(len(_lowerCamelCase) , 1004)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ : Tuple = tokenizer.tokenize(_lowerCamelCase)
UpperCAmelCase__ : Dict = rust_tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : int = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : str = tokenizer.encode(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Dict = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : Any = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case__ ( self):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = """Hello World!"""
UpperCAmelCase__ : int = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase))
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase))
@require_torch
@slow
def snake_case__ ( self):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase__ : str = """ """.join(_lowerCamelCase)
UpperCAmelCase__ : int = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : int = BigBirdConfig(attention_type="""original_full""")
UpperCAmelCase__ : List[str] = BigBirdModel(_lowerCamelCase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase)
model(**_lowerCamelCase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCAmelCase__ : Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""").input_ids)
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""")
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : int = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , ) | 362 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 1_0_0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase__ )
UpperCAmelCase__ : str = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase__ : Dict = scount * numref
UpperCAmelCase__ : int = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase__ : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase__ : str = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase__ : List[Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase__ : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase__ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase__ : str = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase__ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase__ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : Union[str, Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[Any] = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
UpperCAmelCase__ : Tuple = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : int = addtmpscore / len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase__ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = ssent.split(""" """ )
UpperCAmelCase__ : Optional[int] = csent.split(""" """ )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : List[Any] = []
for rsent in rsents:
UpperCAmelCase__ : List[str] = rsent.split(""" """ )
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : str = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : int = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Any = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase__ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase__ : Tuple = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
UpperCAmelCase__ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
UpperCAmelCase__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = sentence
if not return_str:
UpperCAmelCase__ : List[str] = normalized_sent.split()
return normalized_sent
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
UpperCAmelCase__ : Optional[int] = sari_score / len(UpperCamelCase__ )
return 1_0_0 * sari_score
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
UpperCAmelCase__ : int = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
UpperCAmelCase__ : int = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {}
result.update({"""sari""": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""exact""": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase)})
return result | 283 | 0 |
"""simple docstring"""
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
pass
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [
[],
[],
[],
]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
try:
if len(self.queues[priority]) >= 100:
raise OverflowError('''Maximum queue size is 100''')
self.queues[priority].append(lowercase_)
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError('''All queues are empty''')
def __str__( self : List[str]):
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues))
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int):
'''simple docstring'''
if len(self.queue) == 100:
raise OverFlowError('''Maximum queue size is 100''')
self.queue.append(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''')
else:
SCREAMING_SNAKE_CASE_ : Any = min(self.queue)
self.queue.remove(lowercase_)
return data
def __str__( self : int):
'''simple docstring'''
return str(self.queue)
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 91 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ : int = logging.get_logger(__name__)
def _A (__a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = offset
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_)
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa)
if offset:
SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_)
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_)
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_)
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_)
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_)
return image
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''vit_mae'''
def __init__( self : List[str] , lowercase_ : Dict=768 , lowercase_ : Any=12 , lowercase_ : Dict=12 , lowercase_ : Dict=3072 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : str=1e-1_2 , lowercase_ : Dict=224 , lowercase_ : str=16 , lowercase_ : Tuple=3 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=16 , lowercase_ : Optional[int]=512 , lowercase_ : Any=8 , lowercase_ : Dict=2048 , lowercase_ : List[str]=0.75 , lowercase_ : Union[str, Any]=False , **lowercase_ : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = decoder_num_attention_heads
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = decoder_num_hidden_layers
_UpperCamelCase = decoder_intermediate_size
_UpperCamelCase = mask_ratio
_UpperCamelCase = norm_pix_loss
| 63 | # This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
def signal_handler(a__ , a__ ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
pass
class _UpperCAmelCase ( io.StringIO ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : str , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return False
class _UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__A = '''stdin'''
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def lowerCAmelCase__ ( a__=None ) ->Tuple:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = "1"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( __snake_case : int ) -> bool:
if not isinstance(__snake_case , __snake_case ):
lowercase : List[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__snake_case )
if number < 0:
return False
lowercase : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = XLMRobertaTokenizer
lowerCAmelCase_ = XLMRobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Union[str, Any] = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = '''<pad>'''
lowercase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ : List[str] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ : List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ : Tuple = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase_ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
lowercase_ : Union[str, Any] = tempfile.mkdtemp()
lowercase_ : Any = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase_ : str = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
lowercase_ : List[Any] = tempfile.mkdtemp()
lowercase_ : Dict = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ : Union[str, Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
lowercase_ : Optional[Any] = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : Dict = self.get_rust_tokenizer()
lowercase_ : List[str] = '''I was born in 92000, and this is falsé.'''
lowercase_ : List[str] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : str = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''Hello World!'''
lowercase_ : Optional[Any] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase_ : Tuple = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''gpt_neox'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_04_32 , __SCREAMING_SNAKE_CASE=61_44 , __SCREAMING_SNAKE_CASE=44 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_45_76 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=1_00_00 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Tuple = rotary_pct
lowercase_ : Optional[Any] = rotary_emb_base
lowercase_ : Any = attention_dropout
lowercase_ : str = hidden_dropout
lowercase_ : Dict = classifier_dropout
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Union[str, Any] = use_cache
lowercase_ : int = tie_word_embeddings
lowercase_ : Tuple = use_parallel_residual
lowercase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : List[Any] = self.rope_scaling.get('''type''' , __SCREAMING_SNAKE_CASE )
lowercase_ : int = self.rope_scaling.get('''factor''' , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 264 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowercase ( snake_case_ ):
lowercase = "vit"
def __init__( self : Tuple , snake_case : List[Any]=7_6_8 , snake_case : Optional[int]=1_2 , snake_case : str=1_2 , snake_case : Optional[int]=3_0_7_2 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : List[str]=1e-12 , snake_case : Optional[Any]=2_2_4 , snake_case : int=1_6 , snake_case : List[str]=3 , snake_case : Optional[Any]=True , snake_case : Dict=1_6 , **snake_case : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**snake_case )
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : Optional[int] = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = initializer_range
UpperCamelCase_ : int = layer_norm_eps
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : Any = patch_size
UpperCamelCase_ : List[Any] = num_channels
UpperCamelCase_ : Dict = qkv_bias
UpperCamelCase_ : List[Any] = encoder_stride
class _lowercase ( snake_case_ ):
lowercase = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return 1e-4
| 175 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCAmelCase ):
_a : BigBirdConfig
_a : jnp.dtype= jnp.floataa
_a : bool= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setup()
lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = super().__call__(*snake_case ,**snake_case )
lowercase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = logits.shape[-1]
lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
_a : str= "google/bigbird-roberta-base"
_a : int= 3000
_a : int= 1_0500
_a : int= 128
_a : int= 3
_a : int= 1
_a : int= 5
# tx_args
_a : float= 3E-5
_a : float= 0.0
_a : int= 2_0000
_a : float= 0.00_95
_a : str= "bigbird-roberta-natural-questions"
_a : str= "training-expt"
_a : str= "data/nq-training.jsonl"
_a : str= "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=snake_case )
lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir )
lowercase : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
_a : int
_a : int= 4096 # no dynamic padding on TPUs
def __call__( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.collate_fn(snake_case )
lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case )
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
lowercase : Tuple = {
"""input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
if seed is not None:
lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
def loss_fn(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = model_inputs.pop("""start_labels""" )
lowercase : Optional[int] = model_inputs.pop("""end_labels""" )
lowercase : str = model_inputs.pop("""pooled_labels""" )
lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = model_inputs.pop("""start_labels""" )
lowercase : Dict = model_inputs.pop("""end_labels""" )
lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" )
lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[Any] = outputs
lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
_a : Callable= struct.field(pytree_node=lowerCAmelCase )
@dataclass
class __snake_case :
_a : Args
_a : Callable
_a : Callable
_a : Callable
_a : Callable
_a : wandb
_a : Callable= None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,)
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case )
lowercase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase , lowercase : Tuple = build_tx(**snake_case )
lowercase : str = train_state.TrainState(
step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,)
lowercase : Any = args
lowercase : Optional[Any] = data_collator
lowercase : List[str] = lr
lowercase : str = params
lowercase : Tuple = jax_utils.replicate(snake_case )
return state
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.args
lowercase : Optional[Any] = len(snake_case ) // args.batch_size
lowercase : int = jax.random.PRNGKey(0 )
lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case )
lowercase : int = 0
for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ):
lowercase : Dict = self.data_collator(snake_case )
lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[Any] = jax_utils.unreplicate(state.step )
lowercase : List[str] = running_loss.item() / i
lowercase : List[str] = self.scheduler_fn(state_step - 1 )
lowercase : int = self.evaluate(snake_case ,snake_case )
lowercase : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case ,commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size )
lowercase : Any = len(snake_case ) // self.args.batch_size
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : Optional[int] = 0
for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ):
lowercase : Tuple = self.data_collator(snake_case )
lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = jax_utils.unreplicate(snake_case )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ )
self.model_save_fn(snake_case ,params=state.params )
with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) )
with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,snake_case )
print("""DONE""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase : str = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : List[str] = num_train_steps - warmup_steps
lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
def weight_decay_mask(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 20 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=1_3 , A=3_2 , A=3 , A=4 , A=[1_0, 2_0, 3_0, 4_0] , A=[2, 2, 3, 2] , A=True , A=True , A=3_7 , A="gelu" , A=1_0 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> str:
_UpperCAmelCase : str = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[Any] = num_stages
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Dict = out_features
_UpperCAmelCase : Dict = out_indices
_UpperCAmelCase : Tuple = scope
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , A , A , A ) -> Dict:
_UpperCAmelCase : Tuple = ConvNextModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self , A , A , A ) -> Union[str, Any]:
_UpperCAmelCase : Dict = ConvNextForImageClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A ) -> Tuple:
_UpperCAmelCase : int = ConvNextBackbone(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[Any] = ConvNextBackbone(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a__ =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
a__ =True
a__ =False
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : str = ConvNextModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(A )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Any = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(A , A , A ):
_UpperCAmelCase : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : str = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[int] = True
check_hidden_states_output(A , A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = ConvNextModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(A )
_UpperCAmelCase : Optional[Any] = self.default_image_processor
_UpperCAmelCase : Tuple = prepare_img()
_UpperCAmelCase : int = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**A )
# verify the logits
_UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : Tuple = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ,a ):
'''simple docstring'''
a__ =(ConvNextBackbone,) if is_torch_available() else ()
a__ =ConvNextConfig
a__ =False
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = ConvNextModelTester(self )
| 350 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =TransfoXLTokenizer
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_UpperCAmelCase : Dict = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : str = '''<unk> UNwanted , running'''
_UpperCAmelCase : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : str = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
_UpperCAmelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_UpperCAmelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = len(A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 68 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
raise NotImplementedError()
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = False , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = tokenizer
lowerCAmelCase : Union[str, Any] = skip_prompt
lowerCAmelCase : Dict = decode_kwargs
# variables used in the streaming process
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = True
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowerCAmelCase : Any = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowerCAmelCase : Union[str, Any] = text[self.print_len :]
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Union[str, Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase : Dict = text[self.print_len :]
self.print_len += len(snake_case__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase : List[Any] = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(snake_case__ )
self.on_finalized_text(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowerCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase : str = text[self.print_len :]
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Optional[int] = ''
lowerCAmelCase : Dict = True
self.on_finalized_text(snake_case__ , stream_end=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
print(snake_case__ , flush=snake_case__ , end="" if not stream_end else None )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = False , snake_case__ = None , **snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ , **snake_case__ )
lowerCAmelCase : Tuple = Queue()
lowerCAmelCase : List[str] = None
lowerCAmelCase : Union[str, Any] = timeout
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
self.text_queue.put(snake_case__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 108 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase : List[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''whisper'''
__A : List[Any] = ['''past_key_values''']
__A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=5_1865 , lowercase=80 , lowercase=6 , lowercase=4 , lowercase=6 , lowercase=4 , lowercase=1536 , lowercase=1536 , lowercase=0.0 , lowercase=0.0 , lowercase=5_0257 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=256 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=False , lowercase=1500 , lowercase=448 , lowercase=5_0256 , lowercase=5_0256 , lowercase=5_0256 , lowercase=None , lowercase=[220, 5_0256] , lowercase=False , lowercase=256 , lowercase=False , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=7 , **lowercase , ) -> str:
'''simple docstring'''
a__ : int = vocab_size
a__ : int = num_mel_bins
a__ : Optional[int] = d_model
a__ : List[str] = encoder_layers
a__ : Dict = encoder_attention_heads
a__ : List[str] = decoder_layers
a__ : Tuple = decoder_attention_heads
a__ : List[str] = decoder_ffn_dim
a__ : Optional[Any] = encoder_ffn_dim
a__ : Tuple = dropout
a__ : Optional[int] = attention_dropout
a__ : Any = activation_dropout
a__ : Any = activation_function
a__ : List[Any] = init_std
a__ : Optional[int] = encoder_layerdrop
a__ : Union[str, Any] = decoder_layerdrop
a__ : Tuple = use_cache
a__ : List[str] = encoder_layers
a__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict = max_source_positions
a__ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[int] = classifier_proj_size
a__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[Any] = apply_spec_augment
a__ : int = mask_time_prob
a__ : int = mask_time_length
a__ : List[Any] = mask_time_min_masks
a__ : str = mask_feature_prob
a__ : Optional[int] = mask_feature_length
a__ : Union[str, Any] = mask_feature_min_masks
a__ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , suppress_tokens=lowercase , begin_suppress_tokens=lowercase , **lowercase , )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ : Optional[Any] = {0: 'batch'}
else:
a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs')
return common_inputs
def __lowercase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 2_2050 , lowercase = 5.0 , lowercase = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = OrderedDict()
a__ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase , framework=lowercase , sampling_rate=lowercase , time_duration=lowercase , frequency=lowercase , )
a__ : List[Any] = encoder_inputs['input_features'].shape[2]
a__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase , lowercase , lowercase , lowercase)
a__ : List[str] = encoder_inputs.pop('input_features')
a__ : Optional[int] = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ : List[str] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-3
| 99 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[str] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 | '''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = UnCLIPImageVariationPipeline
__lowerCAmelCase : Optional[Any] = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
__lowerCAmelCase : Tuple = IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Optional[int] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
__lowerCAmelCase : Any = False
@property
def __lowerCamelCase ( self :Optional[int] ):
return 3_2
@property
def __lowerCamelCase ( self :List[Any] ):
return 3_2
@property
def __lowerCamelCase ( self :List[str] ):
return self.time_input_dim
@property
def __lowerCamelCase ( self :List[str] ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :List[str] ):
return 1_0_0
@property
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self :Any ):
torch.manual_seed(0 )
snake_case__ : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :Dict ):
torch.manual_seed(0 )
snake_case__ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=3_2 ,intermediate_size=3_7 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : List[str] = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case__ : Any = UnCLIPTextProjModel(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : Any = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case__ : int = UNetaDConditionModel(**__lowercase )
return model
@property
def __lowerCamelCase ( self :List[str] ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCamelCase ( self :Tuple ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case__ : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Dict = self.dummy_decoder
snake_case__ : List[str] = self.dummy_text_proj
snake_case__ : List[str] = self.dummy_text_encoder
snake_case__ : Optional[Any] = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_super_res_first
snake_case__ : str = self.dummy_super_res_last
snake_case__ : Optional[int] = UnCLIPScheduler(
variance_type='''learned_range''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
snake_case__ : Dict = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
snake_case__ : str = CLIPImageProcessor(crop_size=3_2 ,size=3_2 )
snake_case__ : Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any] ,__lowercase :int=0 ,__lowercase :Union[str, Any]=True ):
snake_case__ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : Optional[int] = torch.manual_seed(__lowercase )
else:
snake_case__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
if pil_image:
snake_case__ : str = input_image * 0.5 + 0.5
snake_case__ : Tuple = input_image.clamp(0 ,1 )
snake_case__ : Dict = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
snake_case__ : Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Union[str, Any] = '''cpu'''
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Optional[int] = self.pipeline_class(**__lowercase )
snake_case__ : List[str] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : str = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : List[str] = pipe(**__lowercase )
snake_case__ : Optional[int] = output.images
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : str = pipe(
**__lowercase ,return_dict=__lowercase ,)[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[Any] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :int ):
snake_case__ : Dict = '''cpu'''
snake_case__ : int = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__lowercase )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[int] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : Tuple = pipe(**__lowercase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : List[str] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : str = pipe(
**__lowercase ,return_dict=__lowercase ,)[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Union[str, Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[int] = '''cpu'''
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : Tuple = self.pipeline_class(**__lowercase )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : int = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : List[str] = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case__ : Tuple = pipe(**__lowercase )
snake_case__ : Dict = output.images
snake_case__ : List[str] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : int = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case__ : int = pipe(
**__lowercase ,return_dict=__lowercase ,)[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case__ : List[str] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = torch.device('''cpu''' )
class a :
__lowerCAmelCase : Union[str, Any] = 1
snake_case__ : int = self.get_dummy_components()
snake_case__ : Tuple = self.pipeline_class(**__lowercase )
snake_case__ : Optional[int] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : int = pipe.decoder.dtype
snake_case__ : List[str] = 1
snake_case__ : Union[str, Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : int = pipe.prepare_latents(
__lowercase ,dtype=__lowercase ,device=__lowercase ,generator=__lowercase ,latents=__lowercase ,scheduler=DummyScheduler() )
snake_case__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Union[str, Any] = pipe.prepare_latents(
__lowercase ,dtype=__lowercase ,device=__lowercase ,generator=__lowercase ,latents=__lowercase ,scheduler=DummyScheduler() )
snake_case__ : List[Any] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
snake_case__ : Dict = pipe(
**__lowercase ,decoder_latents=__lowercase ,super_res_latents=__lowercase ).images
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__lowercase ,pil_image=__lowercase )
# Don't pass image, instead pass embedding
snake_case__ : int = pipeline_inputs.pop('''image''' )
snake_case__ : List[Any] = pipe.image_encoder(__lowercase ).image_embeds
snake_case__ : List[str] = pipe(
**__lowercase ,decoder_latents=__lowercase ,super_res_latents=__lowercase ,image_embeddings=__lowercase ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __lowerCamelCase ( self :Dict ):
snake_case__ : Any = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : Dict = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase ,expected_max_diff=__lowercase )
@skip_mps
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[Any] = torch_device == '''cpu'''
snake_case__ : List[str] = True
snake_case__ : Optional[int] = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__lowercase ,relax_max_difference=__lowercase ,additional_params_copy_to_batched_inputs=__lowercase ,)
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowercase ,additional_params_copy_to_batched_inputs=__lowercase ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowercase )
@skip_mps
def __lowerCamelCase ( self :Tuple ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCamelCase ( self :str ):
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self :List[Any] ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case__ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' ,torch_dtype=torch.floataa )
snake_case__ : Optional[int] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
snake_case__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : List[str] = pipeline(
__lowercase ,generator=__lowercase ,output_type='''np''' ,)
snake_case__ : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase ,1_5 )
| 230 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
__lowerCAmelCase : Any = MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : Optional[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self :str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' )
snake_case__ : int = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] ,)
snake_case__ : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-0_5,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-0_5,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] ,)
snake_case__ : Optional[int] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' )
snake_case__ : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : List[str] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : Union[str, Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] ,)
snake_case__ : Optional[int] = unmasker('''My name is <mask> <mask>''' ,top_k=2 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] ,)
@require_torch_gpu
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[int] = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' )
# convert model to fp16
pipe.model.half()
snake_case__ : List[str] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowercase ,__lowercase )
@slow
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' )
self.run_large_test(__lowercase )
@slow
@require_tf
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' )
self.run_large_test(__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ):
snake_case__ : Optional[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] ,)
snake_case__ : str = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] ,)
snake_case__ : Dict = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' )
snake_case__ : str = None
snake_case__ : int = None
self.run_pipeline_test(__lowercase ,[] )
@require_tf
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[int] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' )
snake_case__ : int = None
snake_case__ : List[str] = None
self.run_pipeline_test(__lowercase ,[] )
def __lowerCamelCase ( self :Any ,__lowercase :Any ,__lowercase :str ,__lowercase :Union[str, Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
snake_case__ : Optional[int] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : List[str] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ):
snake_case__ : List[str] = fill_masker.tokenizer
snake_case__ : List[Any] = fill_masker.model
snake_case__ : Dict = fill_masker(
F"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : List[str] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowercase ,[
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
] ,)
with self.assertRaises(__lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowercase ):
fill_masker('''This is''' )
self.run_test_top_k(__lowercase ,__lowercase )
self.run_test_targets(__lowercase ,__lowercase )
self.run_test_top_k_targets(__lowercase ,__lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowercase ,__lowercase )
self.fill_mask_with_multiple_masks(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : int = tokenizer.get_vocab()
snake_case__ : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case__ : List[Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,targets=__lowercase )
snake_case__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,__lowercase )
snake_case__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) )
# Call argument
snake_case__ : str = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : str = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,__lowercase )
snake_case__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) )
# Score equivalence
snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
snake_case__ : Union[str, Any] = [top_mask['''token_str'''] for top_mask in outputs]
snake_case__ : Tuple = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowercase ) == set(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
snake_case__ : int = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
# Raises with invalid
with self.assertRaises(__lowercase ):
snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''''''] )
with self.assertRaises(__lowercase ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='''''' )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Dict ):
snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,top_k=2 )
snake_case__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :str ):
snake_case__ : Optional[int] = tokenizer.get_vocab()
snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
# top_k=2, ntargets=3
snake_case__ : int = sorted(vocab.keys() )[:3]
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=__lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case__ : Dict = [el['''token_str'''] for el in sorted(__lowercase ,key=lambda __lowercase : x["score"] ,reverse=__lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowercase ).issubset(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=__lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict ):
snake_case__ : Union[str, Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : str = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case__ : int = sorted(vocab.keys() )[:3]
snake_case__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case__ : Optional[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=__lowercase ,top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowercase ) ,3 )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ):
snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : Tuple = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
__lowercase ,[
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
] ,)
| 230 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self : Optional[Any], __lowercase : Optional[Any], __lowercase : Any=13, __lowercase : Optional[Any]=3, __lowercase : str=True, __lowercase : Union[str, Any]=True, __lowercase : int=0.1, __lowercase : Any=0.1, __lowercase : Optional[int]=224, __lowercase : Tuple=1000, __lowercase : Dict=[3, 3, 6, 4], __lowercase : str=[48, 56, 112, 220], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = num_labels
lowercase__ = image_size
lowercase__ = layer_depths
lowercase__ = embed_dims
def A__ ( self : Tuple ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : List[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=__lowercase, layer_scale_init_value=1e-5, )
def A__ ( self : str, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Union[str, Any] ):
lowercase__ = SwiftFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self : Optional[int], __lowercase : Optional[Any], __lowercase : Dict, __lowercase : Dict ):
lowercase__ = self.num_labels
lowercase__ = SwiftFormerForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
lowercase__ = SwiftFormerForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] ):
((lowercase__) , (lowercase__) , (lowercase__)) = self.prepare_config_and_inputs()
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : int =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] =(
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Dict =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =False
def A__ ( self : Dict ):
lowercase__ = SwiftFormerModelTester(self )
lowercase__ = ConfigTester(
self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, )
def A__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self : Dict ):
pass
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase, nn.Linear ) )
def A__ ( self : List[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A__ ( self : str ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SwiftFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self : List[str] ):
pass
def A__ ( self : Optional[Any] ):
def check_hidden_states_output(__lowercase : Any, __lowercase : Optional[int], __lowercase : Optional[int] ):
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 8
self.assertEqual(len(__lowercase ), __lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowercase ) ):
self.assertEqual(
hidden_states[i].shape, torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ), )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : Any ):
def _config_zero_init(__lowercase : List[str] ):
lowercase__ = copy.deepcopy(__lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowercase, __lowercase, 1e-1_0 )
if isinstance(getattr(__lowercase, __lowercase, __lowercase ), __lowercase ):
lowercase__ = _config_zero_init(getattr(__lowercase, __lowercase ) )
setattr(__lowercase, __lowercase, __lowercase )
return configs_no_init
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=__lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : Optional[Any] ):
pass
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self : Dict ):
lowercase__ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(**__lowercase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
| 224 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""nllb-moe"""
UpperCamelCase__ : Dict =["""past_key_values"""]
UpperCamelCase__ : List[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str], __lowercase : str=12_8112, __lowercase : List[Any]=1024, __lowercase : int=12, __lowercase : Union[str, Any]=4096, __lowercase : Union[str, Any]=16, __lowercase : Tuple=12, __lowercase : int=4096, __lowercase : Any=16, __lowercase : List[str]=0.05, __lowercase : Optional[int]=0.05, __lowercase : int=True, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]="relu", __lowercase : str=1024, __lowercase : Optional[Any]=0.1, __lowercase : List[Any]=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : Tuple=0.02, __lowercase : Optional[Any]=2, __lowercase : Optional[int]=True, __lowercase : List[Any]=False, __lowercase : Any="float32", __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=128, __lowercase : Union[str, Any]=64, __lowercase : Optional[int]=4, __lowercase : Dict=4, __lowercase : Any=0.001, __lowercase : Optional[int]=0.001, __lowercase : int="all", __lowercase : Any=False, __lowercase : Optional[int]=False, __lowercase : Dict=1.0, __lowercase : int=0.2, __lowercase : str=1, __lowercase : Optional[Any]=0, __lowercase : Optional[int]=2, __lowercase : Any=False, **__lowercase : List[str], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = router_z_loss_coef
lowercase__ = router_aux_loss_coef
lowercase__ = decoder_sparse_step
lowercase__ = encoder_sparse_step
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = batch_prioritized_routing
lowercase__ = second_expert_policy
lowercase__ = normalize_router_prob_before_dropping
lowercase__ = moe_eval_capacity_token_fraction
lowercase__ = moe_token_dropout
lowercase__ = output_router_logits
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, )
| 224 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="resnet50" , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , ) -> str:
_lowercase =parent
_lowercase =out_indices if out_indices is not None else [4]
_lowercase =stage_names
_lowercase =out_features
_lowercase =backbone
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =use_pretrained_backbone
_lowercase =is_training
def __A (self ) -> Tuple:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =self.get_config()
return config, pixel_values
def __A (self ) -> Optional[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =TimmBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __A (self ) -> List[str]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =TimmBackboneModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A (self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A (self ) -> Optional[Any]:
_lowercase ='''resnet18'''
_lowercase ='''microsoft/resnet-18'''
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase , out_indices=[1, 2, 3] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A (self ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A (self ) -> Any:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A (self ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A (self ) -> List[str]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A (self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A (self ) -> List[Any]:
pass
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
_lowercase =self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase =self.all_model_classes[0]
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
_lowercase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_lowercase =model(**UpperCAmelCase )
_lowercase =outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =None
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =False
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ : List[Any] ={
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =["""ConvNextFeatureExtractor"""]
A_ : Union[str, Any] =["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =[
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =[
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A_ : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 357 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A_ : List[str] =parser.parse_args()
A_ : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__snake_case : int = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
return 12
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return 12
@property
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[str] =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(snake_case_ )
@property
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[int] =12
A__ : Union[str, Any] =12
A__ : Optional[int] ={
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
A__ : str =TransformeraDModel(**snake_case_ )
return model
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Tuple ="""cpu"""
A__ : Optional[Any] =self.dummy_vqvae
A__ : Tuple =self.dummy_text_encoder
A__ : List[str] =self.dummy_tokenizer
A__ : Optional[int] =self.dummy_transformer
A__ : Optional[Any] =VQDiffusionScheduler(self.num_embed )
A__ : int =LearnedClassifierFreeSamplingEmbeddings(learnable=snake_case_ )
A__ : Union[str, Any] =VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
A__ : Optional[int] =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A__ : Any ="""teddy bear playing in the pool"""
A__ : List[str] =torch.Generator(device=snake_case_ ).manual_seed(0 )
A__ : str =pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
A__ : Optional[Any] =output.images
A__ : List[Any] =torch.Generator(device=snake_case_ ).manual_seed(0 )
A__ : Dict =pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
A__ : Tuple =image[0, -3:, -3:, -1]
A__ : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A__ : Tuple =np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : str ="""cpu"""
A__ : List[Any] =self.dummy_vqvae
A__ : Optional[int] =self.dummy_text_encoder
A__ : Union[str, Any] =self.dummy_tokenizer
A__ : Optional[int] =self.dummy_transformer
A__ : List[str] =VQDiffusionScheduler(self.num_embed )
A__ : Optional[int] =LearnedClassifierFreeSamplingEmbeddings(
learnable=snake_case_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A__ : Any =VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
A__ : Optional[int] =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A__ : Optional[int] ="""teddy bear playing in the pool"""
A__ : List[Any] =torch.Generator(device=snake_case_ ).manual_seed(0 )
A__ : Optional[int] =pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
A__ : Any =output.images
A__ : Optional[int] =torch.Generator(device=snake_case_ ).manual_seed(0 )
A__ : Any =pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
A__ : List[Any] =image[0, -3:, -3:, -1]
A__ : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A__ : Dict =np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
A__ : Union[str, Any] =VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
A__ : Tuple =pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A__ : int =torch.Generator(device=snake_case_ ).manual_seed(0 )
A__ : Any =pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=snake_case_ , output_type="""np""" , )
A__ : List[str] =output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 134 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
__lowerCamelCase : Optional[Any] = datasets.logging.get_logger(__name__)
__lowerCamelCase : Any = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCamelCase : List[str] = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCamelCase : Optional[int] = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowercase ( self : Tuple , __A : int ):
if self.config_name == "default":
snake_case__ : int = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case__ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowercase ( self : Any , __A : Tuple , __A : Any , __A : Tuple , __A : Tuple=None , __A : Any=False ):
if gpus is None:
snake_case__ : Optional[Any] = 1 if torch.cuda.is_available() else 0
snake_case__ : List[str] = {"src": sources, "mt": predictions, "ref": references}
snake_case__ : Optional[int] = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
snake_case__, snake_case__ : List[str] = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 286 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roberta-prelayernorm"
def __init__( self : Tuple , __A : Any=5_0_2_6_5 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : List[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[Any]=5_1_2 , __A : List[str]=2 , __A : Optional[int]=0.0_2 , __A : Tuple=1e-1_2 , __A : Any=1 , __A : str=0 , __A : int=2 , __A : List[str]="absolute" , __A : Optional[Any]=True , __A : List[Any]=None , **__A : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 |
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Union[str, Any]=("DownEncoderBlock2D",) , UpperCAmelCase_ : int=(64,) , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : Union[str, Any]="silu" , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Optional[int] = layers_per_block
__UpperCAmelCase : Any = torch.nn.Convad(
UpperCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Any = nn.ModuleList([] )
# down
__UpperCAmelCase : int = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : int = output_channel
__UpperCAmelCase : int = block_out_channels[i]
__UpperCAmelCase : Optional[Any] = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : List[Any] = get_down_block(
UpperCAmelCase_ , num_layers=self.layers_per_block , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
__UpperCAmelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# out
__UpperCAmelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase_ , eps=1e-6 )
__UpperCAmelCase : Dict = nn.SiLU()
__UpperCAmelCase : int = 2 * out_channels if double_z else out_channels
__UpperCAmelCase : List[Any] = nn.Convad(block_out_channels[-1] , UpperCAmelCase_ , 3 , padding=1 )
__UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = x
__UpperCAmelCase : Dict = self.conv_in(UpperCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : Tuple ):
def custom_forward(*UpperCAmelCase_ : int ):
return module(*UpperCAmelCase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__UpperCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
# middle
__UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
for down_block in self.down_blocks:
__UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ )
# middle
__UpperCAmelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
__UpperCAmelCase : Optional[Any] = down_block(UpperCAmelCase_ )
# middle
__UpperCAmelCase : Any = self.mid_block(UpperCAmelCase_ )
# post-process
__UpperCAmelCase : Union[str, Any] = self.conv_norm_out(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.conv_act(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = self.conv_out(UpperCAmelCase_ )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=("UpDecoderBlock2D",) , UpperCAmelCase_ : Optional[int]=(64,) , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]="silu" , UpperCAmelCase_ : Union[str, Any]="group" , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Dict = layers_per_block
__UpperCAmelCase : List[str] = nn.Convad(
UpperCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = nn.ModuleList([] )
__UpperCAmelCase : str = in_channels if norm_type == "spatial" else None
# mid
__UpperCAmelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# up
__UpperCAmelCase : Dict = list(reversed(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : Dict = output_channel
__UpperCAmelCase : Dict = reversed_block_out_channels[i]
__UpperCAmelCase : Union[str, Any] = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : Any = get_up_block(
UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , resnet_time_scale_shift=UpperCAmelCase_ , )
self.up_blocks.append(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = output_channel
# out
if norm_type == "spatial":
__UpperCAmelCase : Optional[Any] = SpatialNorm(block_out_channels[0] , UpperCAmelCase_ )
else:
__UpperCAmelCase : Dict = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase_ , eps=1e-6 )
__UpperCAmelCase : Optional[int] = nn.SiLU()
__UpperCAmelCase : Tuple = nn.Convad(block_out_channels[0] , UpperCAmelCase_ , 3 , padding=1 )
__UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=None ):
"""simple docstring"""
__UpperCAmelCase : int = z
__UpperCAmelCase : List[Any] = self.conv_in(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : str ):
def custom_forward(*UpperCAmelCase_ : Any ):
return module(*UpperCAmelCase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__UpperCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
__UpperCAmelCase : str = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
# middle
__UpperCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# middle
__UpperCAmelCase : Union[str, Any] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase : Optional[int] = up_block(UpperCAmelCase_ , UpperCAmelCase_ )
# post-process
if latent_embeds is None:
__UpperCAmelCase : Tuple = self.conv_norm_out(UpperCAmelCase_ )
else:
__UpperCAmelCase : List[Any] = self.conv_norm_out(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Any = self.conv_act(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = self.conv_out(UpperCAmelCase_ )
return sample
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str="random" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=True ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Tuple = n_e
__UpperCAmelCase : Tuple = vq_embed_dim
__UpperCAmelCase : List[Any] = beta
__UpperCAmelCase : Dict = legacy
__UpperCAmelCase : Optional[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__UpperCAmelCase : int = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__UpperCAmelCase : List[str] = self.used.shape[0]
__UpperCAmelCase : Dict = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCAmelCase : str = self.re_embed
__UpperCAmelCase : int = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
__UpperCAmelCase : Dict = n_e
__UpperCAmelCase : str = sane_index_shape
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = inds.shape
assert len(UpperCAmelCase_ ) > 1
__UpperCAmelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
__UpperCAmelCase : List[str] = self.used.to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCAmelCase : Union[str, Any] = match.argmax(-1 )
__UpperCAmelCase : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
__UpperCAmelCase : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__UpperCAmelCase : List[str] = self.unknown_index
return new.reshape(UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = inds.shape
assert len(UpperCAmelCase_ ) > 1
__UpperCAmelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
__UpperCAmelCase : Dict = self.used.to(UpperCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
__UpperCAmelCase : List[Any] = 0 # simply set to zero
__UpperCAmelCase : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase_ )
return back.reshape(UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
__UpperCAmelCase : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
__UpperCAmelCase : List[str] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCAmelCase : Optional[int] = torch.argmin(torch.cdist(UpperCAmelCase_ , self.embedding.weight ) , dim=1 )
__UpperCAmelCase : Union[str, Any] = self.embedding(UpperCAmelCase_ ).view(z.shape )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
# compute loss for embedding
if not self.legacy:
__UpperCAmelCase : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__UpperCAmelCase : Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__UpperCAmelCase : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCAmelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__UpperCAmelCase : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__UpperCAmelCase : Optional[Any] = self.remap_to_used(UpperCAmelCase_ )
__UpperCAmelCase : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__UpperCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__UpperCAmelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
__UpperCAmelCase : Union[str, Any] = self.unmap_to_all(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__UpperCAmelCase : Any = self.embedding(UpperCAmelCase_ )
if shape is not None:
__UpperCAmelCase : Union[str, Any] = z_q.view(UpperCAmelCase_ )
# reshape back to match original input shape
__UpperCAmelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=False ):
"""simple docstring"""
__UpperCAmelCase : str = parameters
__UpperCAmelCase , __UpperCAmelCase : List[str] = torch.chunk(UpperCAmelCase_ , 2 , dim=1 )
__UpperCAmelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
__UpperCAmelCase : Dict = deterministic
__UpperCAmelCase : List[str] = torch.exp(0.5 * self.logvar )
__UpperCAmelCase : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
__UpperCAmelCase : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.Generator] = None ):
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
__UpperCAmelCase : List[str] = randn_tensor(
self.mean.shape , generator=UpperCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
__UpperCAmelCase : Dict = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Any=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__UpperCAmelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.mean
| 37 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
"""simple docstring"""
__lowercase : int = "roberta"
def __init__( self , lowerCAmelCase__=5_0_2_6_5 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 100 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ : str = 'scheduler_config.json'
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : List[str] = 1
snake_case_ : Tuple = 2
snake_case_ : List[Any] = 3
snake_case_ : Union[str, Any] = 4
snake_case_ : Optional[int] = 5
snake_case_ : str = 6
snake_case_ : Any = 7
snake_case_ : List[str] = 8
snake_case_ : Optional[Any] = 9
snake_case_ : Any = 10
snake_case_ : int = 11
snake_case_ : int = 12
snake_case_ : Union[str, Any] = 13
snake_case_ : int = 14
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : torch.FloatTensor
class __lowerCAmelCase :
snake_case_ : List[str] = SCHEDULER_CONFIG_NAME
snake_case_ : Union[str, Any] = []
snake_case_ : str = True
@classmethod
def UpperCamelCase ( cls : List[str] , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[str] = None , snake_case__ : int=False , **snake_case__ : Tuple , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=snake_case__ , subfolder=snake_case__ , return_unused_kwargs=snake_case__ , return_commit_hash=snake_case__ , **snake_case__ , )
return cls.from_config(snake_case__ , return_unused_kwargs=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Union[str, os.PathLike] , snake_case__ : bool = False , **snake_case__ : List[Any] ):
"""simple docstring"""
self.save_config(save_directory=snake_case__ , push_to_hub=snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls : str ):
"""simple docstring"""
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase = [
getattr(snake_case__ , snake_case__ ) for c in compatible_classes_str if hasattr(snake_case__ , snake_case__ )
]
return compatible_classes
| 133 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase: List[str] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: List[Any] = ["""ViTFeatureExtractor"""]
_UpperCamelCase: List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: str = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCamelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[Any] = """table-transformer"""
lowerCamelCase : Any = ["""past_key_values"""]
lowerCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=3 , lowerCamelCase__=100 , lowerCamelCase__=6 , lowerCamelCase__=2_048 , lowerCamelCase__=8 , lowerCamelCase__=6 , lowerCamelCase__=2_048 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=256 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , lowerCamelCase__=False , lowerCamelCase__="sine" , lowerCamelCase__="resnet50" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = backbone_config.get("""model_type""" )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
lowercase__ , lowercase__ , lowercase__ = None, None, None
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = encoder_layers
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.d_model
class A ( __UpperCAmelCase ):
lowerCamelCase : int = version.parse("""1.11""" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 12
| 164 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class A ( __UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : str = RetriBertTokenizer
lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase__ )
lowercase__ = do_lower_case
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 164 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__: Any = logging.get_logger(__name__)
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 138 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_A, _A ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : str = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_A )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__magic_name__ : List[Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_A )
__magic_name__ : Dict = []
for value in value_array:
__magic_name__ : Tuple = euclidean(_A, dataset[0] )
__magic_name__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(_A, _A )
if dist > temp_dist:
__magic_name__ : Dict = temp_dist
__magic_name__ : Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return np.dot(_A, _A ) / (norm(_A ) * norm(_A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
import math
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__snake_case = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Dict=1 , **snake_case_ : List[Any] ) -> str:
__snake_case = factor * value
__snake_case = value
while not is_prime(snake_case_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case_ )
return value
| 24 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = XCLIPTextConfig()
# derive patch size from model name
_lowerCAmelCase = model_name.find("""patch""" )
_lowerCAmelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_lowerCAmelCase = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
_lowerCAmelCase = 7_68
_lowerCAmelCase = 30_72
_lowerCAmelCase = 12
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 16
_lowerCAmelCase = 24
_lowerCAmelCase = 7_68
_lowerCAmelCase = 30_72
if model_name == "xclip-large-patch14-16-frames":
_lowerCAmelCase = 3_36
_lowerCAmelCase = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
_lowerCAmelCase = 7_68
return config
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
_lowerCAmelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_lowerCAmelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_lowerCAmelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_lowerCAmelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_lowerCAmelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_lowerCAmelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_lowerCAmelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_lowerCAmelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_lowerCAmelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_lowerCAmelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_lowerCAmelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_lowerCAmelCase = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_lowerCAmelCase = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_lowerCAmelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_lowerCAmelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_lowerCAmelCase = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_lowerCAmelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_lowerCAmelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
_lowerCAmelCase = key.split(""".""" )
if key.startswith("""visual""" ):
_lowerCAmelCase = key_split[3]
_lowerCAmelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_lowerCAmelCase = val[
:dim, :
]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[
-dim:, :
]
else:
_lowerCAmelCase = val[
:dim
]
_lowerCAmelCase = val[
dim : dim * 2
]
_lowerCAmelCase = val[
-dim:
]
else:
if "weight" in key:
_lowerCAmelCase = val[
:dim, :
]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[
-dim:, :
]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[
dim : dim * 2
]
_lowerCAmelCase = val[-dim:]
elif key.startswith("""mit""" ):
_lowerCAmelCase = key_split[2]
_lowerCAmelCase = config.vision_config.mit_hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = key_split[2]
_lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[
dim : dim * 2
]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_lowerCAmelCase = val.T
_lowerCAmelCase = val
return orig_state_dict
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
_lowerCAmelCase = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_lowerCAmelCase = """eating_spaghetti.npy"""
elif num_frames == 32:
_lowerCAmelCase = """eating_spaghetti_32_frames.npy"""
_lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=snake_case , repo_type="""dataset""" , )
_lowerCAmelCase = np.load(snake_case )
return list(snake_case )
def _UpperCAmelCase ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_lowerCAmelCase = model_to_url[model_name]
_lowerCAmelCase = 8
if "16-frames" in model_name:
_lowerCAmelCase = 16
elif "shot" in model_name:
_lowerCAmelCase = 32
_lowerCAmelCase = get_xclip_config(snake_case , snake_case )
_lowerCAmelCase = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
_lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
_lowerCAmelCase = torch.load(snake_case , map_location="""cpu""" )["""model"""]
else:
_lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case )["""model"""]
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
_lowerCAmelCase = XCLIPModel(snake_case )
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_lowerCAmelCase = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_lowerCAmelCase = VideoMAEImageProcessor(size=snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_lowerCAmelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_lowerCAmelCase = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
_lowerCAmelCase = prepare_video(snake_case )
_lowerCAmelCase = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=snake_case , return_tensors="""pt""" , padding=snake_case )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_lowerCAmelCase = model(**snake_case )
# Verify outputs
_lowerCAmelCase = outputs.logits_per_video
_lowerCAmelCase = logits_per_video.softmax(dim=1 )
print("""Probs:""" , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
_lowerCAmelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
_lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
_lowerCAmelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
_lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
_lowerCAmelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
_lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_lowerCAmelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_lowerCAmelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_lowerCAmelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(snake_case , organization="""nielsr""" )
processor.push_to_hub(snake_case , organization="""nielsr""" )
slow_tokenizer.push_to_hub(snake_case , organization="""nielsr""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowercase__ ( self : int , _UpperCAmelCase : List[List[List[str]]] , _UpperCAmelCase : List[List[str]] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCAmelCase , hypotheses=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase )
}
| 241 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""_T""")
class lowercase__ ( Generic[_T] ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Iterable[_T] | None = None ) -> None:
'''simple docstring'''
UpperCAmelCase_ = list(iterable or [] )
UpperCAmelCase_ = []
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : _T ) -> None:
'''simple docstring'''
self._stacka.append(_UpperCAmelCase )
def lowercase__ ( self : Dict ) -> _T:
'''simple docstring'''
UpperCAmelCase_ = self._stacka.pop
UpperCAmelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =tempfile.mkdtemp()
# fmt: off
UpperCAmelCase : Dict =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase : Optional[int] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase : Optional[Any] =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCAmelCase : List[Any] ={'''unk_token''': '''<unk>'''}
UpperCAmelCase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case__ ) )
UpperCAmelCase : str ={
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCAmelCase : Union[str, Any] =os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Tuple:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> List[str]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : Any =[Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =self.get_tokenizer()
UpperCAmelCase : int =self.get_rust_tokenizer()
UpperCAmelCase : Tuple =self.get_image_processor()
UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
UpperCAmelCase : int =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase : Dict =CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase : int =self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
UpperCAmelCase : Union[str, Any] =CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.get_image_processor()
UpperCAmelCase : Optional[Any] =self.get_tokenizer()
UpperCAmelCase : Dict =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : Dict =self.prepare_image_inputs()
UpperCAmelCase : str =image_processor(snake_case__ , return_tensors='''np''' )
UpperCAmelCase : int =processor(images=snake_case__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.get_image_processor()
UpperCAmelCase : int =self.get_tokenizer()
UpperCAmelCase : Dict =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : int ='''lower newer'''
UpperCAmelCase : Tuple =processor(text=snake_case__ )
UpperCAmelCase : Optional[Any] =tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.get_image_processor()
UpperCAmelCase : Dict =self.get_tokenizer()
UpperCAmelCase : Any =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : str ='''lower newer'''
UpperCAmelCase : Optional[int] =self.prepare_image_inputs()
UpperCAmelCase : Tuple =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int =self.get_image_processor()
UpperCAmelCase : Optional[int] =self.get_tokenizer()
UpperCAmelCase : List[str] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Optional[Any] =processor.batch_decode(snake_case__ )
UpperCAmelCase : Union[str, Any] =tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =self.get_image_processor()
UpperCAmelCase : Optional[int] =self.get_tokenizer()
UpperCAmelCase : Optional[int] =CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase : Dict ='''lower newer'''
UpperCAmelCase : Any =self.prepare_image_inputs()
UpperCAmelCase : Optional[Any] =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 348 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : str = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : Dict = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_lowerCamelCase : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_lowerCamelCase : List[Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_lowerCamelCase : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCamelCase : Dict = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCamelCase : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = DPRContextEncoderTokenizer
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = DPRQuestionEncoderTokenizer
_lowerCamelCase : Union[str, Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCamelCase : Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCamelCase : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class __UpperCAmelCase :
'''simple docstring'''
def __call__(self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : List[Any] , ):
if titles is None and texts is None:
return super().__call__(
_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
A = titles if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [titles]
A = texts if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [texts]
A = len(_lowerCAmelCase )
A = questions if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [questions] * n_passages
assert len(_lowerCAmelCase ) == len(
_lowerCAmelCase ), F"""There should be as many titles than texts but got {len(_lowerCAmelCase )} titles and {len(_lowerCAmelCase )} texts."""
A = super().__call__(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )["""input_ids"""]
A = super().__call__(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )["""input_ids"""]
A = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCAmelCase , _lowerCAmelCase )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
def A (self : Union[str, Any] , _lowerCAmelCase : BatchEncoding , _lowerCAmelCase : DPRReaderOutput , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 4 , ):
A = reader_input["""input_ids"""]
A , A , A = reader_output[:3]
A = len(_lowerCAmelCase )
A = sorted(range(_lowerCAmelCase ) , reverse=_lowerCAmelCase , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(_lowerCAmelCase )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCAmelCase , top_spans=_lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCAmelCase , start_index=_lowerCAmelCase , end_index=_lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A (self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ):
A = []
for start_index, start_score in enumerate(_lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
A = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
A = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ , A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = DPRReaderTokenizer
| 337 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.