code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> tuple[int, int]:
try:
__lowerCamelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__lowerCamelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
__lowerCamelCase = len(str(UpperCamelCase__ ).split('''.''' )[1] )
__lowerCamelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCamelCase = 10**number_of_frac_digits
__lowerCamelCase , __lowerCamelCase = denominator, numerator
while True:
__lowerCamelCase = dividend % divisor
if remainder == 0:
break
__lowerCamelCase , __lowerCamelCase = divisor, remainder
__lowerCamelCase , __lowerCamelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 67
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101
| 0
|
def __lowerCAmelCase ( a__ ) -> int:
if n == 1 or not isinstance(a__ , a__ ):
return 0
elif n == 2:
return 1
else:
__a = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( a__ ) -> int:
__a = 0
__a = 2
while digits < n:
index += 1
__a = len(str(fibonacci(a__ ) ) )
return index
def __lowerCAmelCase ( a__ = 1000 ) -> int:
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 353
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_activation("swish" )
self.assertIsInstance(a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_activation("silu" )
self.assertIsInstance(a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_activation("mish" )
self.assertIsInstance(a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = get_activation("gelu" )
self.assertIsInstance(a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 76
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : Any = hex_num[0] == '-'
if is_negative:
A_ : Optional[Any] = hex_num[1:]
try:
A_ : Tuple = int(_UpperCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : Union[str, Any] = ''
while int_num > 0:
A_ : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase_ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCamelCase_ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCamelCase_ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int = CHRF.CHAR_ORDER , __UpperCAmelCase : int = CHRF.WORD_ORDER , __UpperCAmelCase : int = CHRF.BETA , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , ):
'''simple docstring'''
_A = len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_A = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
_A = CHRF(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = sb_chrf.corpus_score(__UpperCAmelCase , __UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 174
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 1
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if "model" in orig_key:
__UpperCamelCase =orig_key.replace('model.' , '' )
if "norm1" in orig_key:
__UpperCamelCase =orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
__UpperCamelCase =orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
__UpperCamelCase =orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
__UpperCamelCase =orig_key.split('.' )[0].split('_' )[-1]
__UpperCamelCase =orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__UpperCamelCase =orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
__UpperCamelCase =orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
__UpperCamelCase =orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
__UpperCamelCase =orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
__UpperCamelCase =orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
__UpperCamelCase =orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
__UpperCamelCase =orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
__UpperCamelCase =orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
__UpperCamelCase =orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
__UpperCamelCase =orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
__UpperCamelCase ='yoso.' + orig_key
return orig_key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__UpperCamelCase =val
__UpperCamelCase =orig_state_dict['cls.predictions.decoder.bias']
__UpperCamelCase =torch.arange(SCREAMING_SNAKE_CASE__ ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model_state_dict']
__UpperCamelCase =YosoConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =YosoForMaskedLM(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE__ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE__ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 62
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=[30, 30] , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=3 , __a=None , __a=8 , __a=10 , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = n_targets
UpperCAmelCase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ = []
for i in range(self.batch_size ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a )
UpperCAmelCase__ = torch.rand(self.n_targets , 4 , device=__a )
labels.append(__a )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase__ (self , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = YolosModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> str:
"""simple docstring"""
UpperCAmelCase__ = YolosForObjectDetection(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(pixel_values=__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ = model(pixel_values=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self , __a , __a , __a=False ) -> str:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long )
UpperCAmelCase__ = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float )
labels.append(__a )
UpperCAmelCase__ = labels
return inputs_dict
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = YolosModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ = len(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = 1
self.assertEqual(out_len + added_hidden_states , len(__a ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a ):
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
# YOLOS has a different seq_length
UpperCAmelCase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a )
@slow
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = YolosModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase_( ) -> str:
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__a , )
UpperCAmelCase__ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4 ) )
# verify postprocessing
UpperCAmelCase__ = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(__a )
UpperCAmelCase__ = [75, 75, 17, 63, 17]
UpperCAmelCase__ = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(__a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , __a , atol=1E-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , __a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , __a ) )
| 335
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
snake_case = flatten_dict(UpperCamelCase_ )
return flax_params
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
snake_case = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
snake_case = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case = new_key.replace(UpperCamelCase_ ,UpperCamelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case = new_key.replace(UpperCamelCase_ ,UpperCamelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case = re.sub(r'''layers_(\d+)''' ,r'''layer.\1''' ,UpperCamelCase_ )
snake_case = new_key.replace('''encoder''' ,'''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case = re.sub(r'''layers_(\d+)''' ,r'''layer.\1''' ,UpperCamelCase_ )
snake_case = flax_dict[key]
snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case = torch.from_numpy(converted_dict[key].T )
else:
snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = get_flax_param(UpperCamelCase_ )
if not use_large:
snake_case = PixaStructVisionConfig()
snake_case = PixaStructTextConfig()
else:
snake_case = PixaStructVisionConfig(
hidden_size=15_36 ,d_ff=39_68 ,num_attention_heads=24 ,num_hidden_layers=18 )
snake_case = PixaStructTextConfig(hidden_size=15_36 ,d_ff=39_68 ,num_heads=24 ,num_layers=18 )
snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=UpperCamelCase_ )
snake_case = PixaStructForConditionalGeneration(UpperCamelCase_ )
snake_case = rename_and_convert_flax_params(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
snake_case = PixaStructImageProcessor()
snake_case = PixaStructProcessor(image_processor=UpperCamelCase_ ,tokenizer=UpperCamelCase_ )
if use_large:
snake_case = 40_96
snake_case = True
# mkdir if needed
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
print('''Model saved in {}'''.format(UpperCamelCase_ ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 127
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=[30, 30] , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=10 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = n_targets
_snake_case = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_snake_case = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_snake_case = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_snake_case = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_snake_case = []
for i in range(self.batch_size ):
_snake_case = {}
_snake_case = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowerCAmelCase_ )
_snake_case = torch.rand(self.n_targets , 4 , device=lowerCAmelCase_ )
labels.append(lowerCAmelCase_ )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = YolosModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = YolosForObjectDetection(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(pixel_values=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_snake_case = model(pixel_values=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowercase = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_snake_case = []
for i in range(self.model_tester.batch_size ):
_snake_case = {}
_snake_case = torch.ones(
size=(self.model_tester.n_targets,) , device=lowerCAmelCase_ , dtype=torch.long )
_snake_case = torch.ones(
self.model_tester.n_targets , 4 , device=lowerCAmelCase_ , dtype=torch.float )
labels.append(lowerCAmelCase_ )
_snake_case = labels
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = YolosModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
# in YOLOS, the seq_len is different
_snake_case = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_snake_case = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = 1
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# YOLOS has a different seq_length
_snake_case = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = YolosModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(inputs.pixel_values )
# verify outputs
_snake_case = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowerCAmelCase_ , )
_snake_case = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
# verify postprocessing
_snake_case = image_processor.post_process_object_detection(
lowerCAmelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_snake_case = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCAmelCase_ )
_snake_case = [75, 75, 17, 63, 17]
_snake_case = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCAmelCase_ )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , lowerCAmelCase_ , atol=1E-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , lowerCAmelCase_ )
self.assertTrue(torch.allclose(results['boxes'][0, :] , lowerCAmelCase_ ) )
| 160
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 160
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : str, _UpperCamelCase : List[str] ) -> Any:
A_ = 1.5
A_ = int(factor * num_class_images )
A_ = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=_UpperCamelCase, aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''', exist_ok=_UpperCamelCase )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
A_ = client.query(text=_UpperCamelCase )
if len(_UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
A_ = int(factor * num_images )
A_ = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=_UpperCamelCase, aesthetic_weight=0.1, )
A_ = 0
A_ = 0
A_ = tqdm(desc='''downloading real regularization images''', total=_UpperCamelCase )
with open(F'''{class_data_dir}/caption.txt''', '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''', '''w''' ) as fa, open(
F'''{class_data_dir}/images.txt''', '''w''' ) as fa:
while total < num_class_images:
A_ = class_images[count]
count += 1
try:
A_ = requests.get(images['''url'''] )
if img.status_code == 2_00:
A_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''', '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = argparse.ArgumentParser('''''', add_help=_UpperCamelCase )
parser.add_argument('''--class_prompt''', help='''text prompt to retrieve images''', required=_UpperCamelCase, type=_UpperCamelCase )
parser.add_argument('''--class_data_dir''', help='''path to save images''', required=_UpperCamelCase, type=_UpperCamelCase )
parser.add_argument('''--num_class_images''', help='''number of images to download''', default=2_00, type=_UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__snake_case : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 18
|
'''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18
| 1
|
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : Optional[int] = """#"""
class a__ :
"""simple docstring"""
def __init__(self ):
__lowerCAmelCase = {}
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self._trie
for char in text:
if char not in trie:
__lowerCAmelCase = {}
__lowerCAmelCase = trie[char]
__lowerCAmelCase = True
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self._trie
for char in prefix:
if char in trie:
__lowerCAmelCase = trie[char]
else:
return []
return self._elements(__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for c, v in d.items():
__lowerCAmelCase = [''' '''] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
_UpperCAmelCase : Dict = Trie()
_UpperCAmelCase : Union[str, Any] = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = trie.find_word(lowerCamelCase)
return tuple(string + word for word in suffixes)
def __magic_name__( ):
print(autocomplete_using_trie('''de'''))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 174
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCAmelCase : List[str] = 8
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 2_5_5).int().clamp(0, 2_5_5)
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c h w -> b c 1 h w''')
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c d h w -> b (c d) h w''')
__lowerCAmelCase = bits * 2 - 1
return bits
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase, dtype=torch.intaa)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b (c d) h w -> b c d h w''', d=8)
__lowerCAmelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''')
return (dec / 2_5_5).clamp(0.0, 1.0)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = True, lowerCamelCase=None, lowerCamelCase = True, ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase) else '''cpu'''
__lowerCAmelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=lowerCamelCase).to(lowerCamelCase)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase="epsilon", lowerCamelCase=None, lowerCamelCase = True, ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(lowerCamelCase, sample.shape[1], dim=1)
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""")
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=lowerCamelCase).to(model_output.device)
__lowerCAmelCase = (self._get_variance(lowerCamelCase, predicted_variance=lowerCamelCase) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = 1.0 , ):
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__(self , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 50 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ):
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
__lowerCAmelCase = decimal_to_bits(__lowercase ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__lowerCAmelCase = bits_to_decimal(__lowercase )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 174
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __a ( unittest.TestCase ,__UpperCamelCase ):
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = load_tool("""text-to-speech""" )
self.tool.setup()
def A ( self : Dict ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase_ : Any = self.tool("""hey""" )
lowerCAmelCase_ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def A ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = self.tool("""hey""" )
lowerCAmelCase_ : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Any , UpperCamelCase: List[str]=13 , UpperCamelCase: List[str]=[30, 30] , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: Any=3 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Union[str, Any]=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: int=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Optional[int]=10 , UpperCamelCase: int=0.02 , UpperCamelCase: List[str]=3 , UpperCamelCase: str=None , UpperCamelCase: str=8 , UpperCamelCase: Tuple=10 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = n_targets
A__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ = []
for i in range(self.batch_size ):
A__ = {}
A__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase )
A__ = torch.rand(self.n_targets , 4 , device=UpperCamelCase )
labels.append(UpperCamelCase )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = YolosModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = YolosForObjectDetection(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(pixel_values=UpperCamelCase )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ = model(pixel_values=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: Dict=False ):
"""simple docstring"""
A__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ = []
for i in range(self.model_tester.batch_size ):
A__ = {}
A__ = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase , dtype=torch.long )
A__ = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase , dtype=torch.float )
labels.append(UpperCamelCase )
A__ = labels
return inputs_dict
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = YolosModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
# in YOLOS, the seq_len is different
A__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Any ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# YOLOS has a different seq_length
A__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = YolosModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(inputs.pixel_values )
# verify outputs
A__ = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=UpperCamelCase , )
A__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify postprocessing
A__ = image_processor.post_process_object_detection(
UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(UpperCamelCase )
A__ = [75, 75, 17, 63, 17]
A__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(UpperCamelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase ) )
| 335
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Any = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['MobileViTFeatureExtractor']
snake_case_ : Any = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-roberta-xl"""
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=250880 ,lowerCamelCase__ : Tuple=2560 ,lowerCamelCase__ : Union[str, Any]=36 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=10240 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=514 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1E-05 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class lowercase__ ( lowercase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 236
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : List[str] = ort.SessionOptions()
__a : Optional[Any] = False
return options
def _lowerCamelCase ( self ):
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = '''A red cat sitting on a park bench'''
__a : int = np.random.RandomState(0 )
__a : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Union[str, Any] = output.images
__a : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a : Any = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A red cat sitting on a park bench'''
__a : List[str] = np.random.RandomState(0 )
__a : List[str] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Tuple = output.images
__a : List[str] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 160
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 160
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : Union[str, Any] ='''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Any =importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase : Union[str, Any] =spec.loader.load_module()
lowerCamelCase : Union[str, Any] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : Union[str, Any] =re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase : List[str] ={
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def SCREAMING_SNAKE_CASE ( ):
UpperCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase__ : Optional[Any] = False
# source code of `config_class`
UpperCamelCase__ : Optional[Any] = inspect.getsource(__lowerCAmelCase )
UpperCamelCase__ : Any = _re_checkpoint.findall(__lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ : Optional[int] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ : Dict = True
break
UpperCamelCase__ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
UpperCamelCase__ : Union[str, Any] = "\n".join(sorted(__lowerCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 368
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Dict=18 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : Union[str, Any]=4_00 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : int=True , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : Dict = min_resolution
UpperCamelCase__ : List[str] = max_resolution
UpperCamelCase__ : Union[str, Any] = do_resize
UpperCamelCase__ : Optional[Any] = size_divisor
UpperCamelCase__ : int = do_rescale
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : List[Any] = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = GLPNImageProcessingTester(self )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size_divisor" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "resample" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 196
| 0
|
class a__ :
def __init__( self : Optional[Any],_A : str,_A : List[str],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = graph
self._normalize_graph(_A,_A )
SCREAMING_SNAKE_CASE_ : str = len(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
def __UpperCamelCase ( self : List[Any],_A : int,_A : Any ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : List[str] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
SCREAMING_SNAKE_CASE_ : List[str] = sources[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0,0 )
self.graph.insert(0,[0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_ : int = max_input_flow
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_ : str = max_input_flow
SCREAMING_SNAKE_CASE_ : Dict = size - 1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self : List[str],_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = algorithm(self )
class a__ :
def __init__( self : Union[str, Any],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = flow_network
SCREAMING_SNAKE_CASE_ : str = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Optional[int] = flow_network.graph
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
class a__ ( A__ ):
def __init__( self : int,_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
# use this to save your result
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( A__ ):
def __init__( self : List[str],_A : int ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : Tuple = [0] * self.verticies_count
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while i < len(_A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Dict = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0,vertices_list.pop(_A ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A,_A )
self.relabel(_A )
def __UpperCamelCase ( self : str,_A : Union[str, Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = min(
self.excesses[from_index],self.graph[from_index][to_index] - self.preflow[from_index][to_index],)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self : Tuple,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : Any = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : int = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : str = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 18
|
from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=99 , _UpperCAmelCase=13 , _UpperCAmelCase=16 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=30 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_layers
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = eos_token_id
snake_case_ = bos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = use_cache
snake_case_ = max_position_embeddings
snake_case_ = None
snake_case_ = decoder_seq_length
snake_case_ = 2
snake_case_ = 1
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
snake_case_ = True
snake_case_ = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
snake_case_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
snake_case_ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(_UpperCAmelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__snake_case = (TrOCRForCausalLM,) if is_torch_available() else ()
__snake_case = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__snake_case = True
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ ( self ):
pass
| 359
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) is dict:
return {k: self.to_torch(_UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_UpperCAmelCase , device=self.unet.device )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(_UpperCAmelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(_UpperCAmelCase , '''observations''' )
snake_case_ = obs[None].repeat(_UpperCAmelCase , axis=0 )
snake_case_ = {0: self.to_torch(_UpperCAmelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(_UpperCAmelCase , device=self.unet.device )
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=_UpperCAmelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(_UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , _UpperCAmelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 267
| 0
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'num_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Dict=[1_6, 4_8, 9_6] , UpperCamelCase__ : Any=[1, 3, 6] , UpperCamelCase__ : List[str]=[1, 2, 1_0] , UpperCamelCase__ : str=[7, 3, 3] , UpperCamelCase__ : int=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[2, 2, 2] , UpperCamelCase__ : List[str]=[False, False, True] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : int=0.0_2 , UpperCamelCase__ : Dict=1E-1_2 , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase , UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = CvtModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Any ):
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : int ):
"""simple docstring"""
pass
@slow
def A ( self : Tuple ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : List[str] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28
|
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28
| 1
|
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(lowerCamelCase_, (list, tuple) ) or not all(
isinstance(lowerCamelCase_, lowerCamelCase_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
UpperCAmelCase__ = numbers[0]
for i in range(1, len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
UpperCAmelCase__ = numbers[i]
if number < 0:
UpperCAmelCase__ = min_till_now, max_till_now
UpperCAmelCase__ = max(lowerCamelCase_, max_till_now * number )
UpperCAmelCase__ = min(lowerCamelCase_, min_till_now * number )
# update the maximum product found till now
UpperCAmelCase__ = max(lowerCamelCase_, lowerCamelCase_ )
return max_prod
| 358
|
from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
_UpperCAmelCase : Tuple = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase__ ( lowerCamelCase ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase ) )
def UpperCAmelCase__ ( ):
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 236
| 0
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __magic_name__ ( __a : int , __a : List[Any] , __a : str ):
'''simple docstring'''
UpperCamelCase__ = OmegaConf.load(__a )
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )["""model"""]
UpperCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCamelCase__ = {}
UpperCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__a ):
UpperCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCamelCase__ = {}
UpperCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__a ):
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ = config.model.params.first_stage_config.params
UpperCamelCase__ = config.model.params.unet_config.params
UpperCamelCase__ = VQModel(**__a ).eval()
vqvae.load_state_dict(__a )
UpperCamelCase__ = UNetLDMModel(**__a ).eval()
unet.load_state_dict(__a )
UpperCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__a , )
UpperCamelCase__ = LDMPipeline(__a , __a , __a )
pipeline.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 178
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 68
|
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
lowercase__: str = sum(a_i[j] for j in range(snake_case , len(snake_case ) ) )
lowercase__: Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(snake_case ) , snake_case ) ) )
lowercase__ , lowercase__: str = 0, 0
lowercase__: Tuple = n - i
lowercase__: Dict = memo.get(snake_case )
if sub_memo is not None:
lowercase__: Optional[Any] = sub_memo.get(snake_case )
if jumps is not None and len(snake_case ) > 0:
# find and make the largest jump without going over
lowercase__: int = -1
for _k in range(len(snake_case ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__: Union[str, Any] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__: Any = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__: str = diff + c
for j in range(min(snake_case , len(snake_case ) ) ):
lowercase__ , lowercase__: Dict = divmod(snake_case , 10 )
if new_c > 0:
add(snake_case , snake_case , snake_case )
else:
lowercase__: List[Any] = []
else:
lowercase__: Optional[Any] = {c: []}
lowercase__: Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__: Union[str, Any] = next_term(snake_case , k - 1 , i + dn , snake_case )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__: Dict = compute(snake_case , snake_case , i + dn , snake_case )
diff += _diff
dn += terms_jumped
lowercase__: Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__: str = 0
while j < len(snake_case ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case , (diff, dn, k) )
return (diff, dn)
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> str:
if i >= n:
return 0, i
if k > len(snake_case ):
a_i.extend([0 for _ in range(k - len(snake_case ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__: List[Any] = i
lowercase__ , lowercase__ , lowercase__: Any = 0, 0, 0
for j in range(len(snake_case ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__: str = ds_c + ds_b
diff += addend
lowercase__: List[str] = 0
for j in range(snake_case ):
lowercase__: Any = a_i[j] + addend
lowercase__ , lowercase__: List[Any] = divmod(snake_case , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case , snake_case , snake_case )
return diff, i - start_i
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
for j in range(snake_case , len(snake_case ) ):
lowercase__: str = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__: Any = divmod(snake_case , 10 )
lowercase__: Any = addend // 10 + quotient
else:
lowercase__: Union[str, Any] = s
lowercase__: Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__: Union[str, Any] = divmod(snake_case , 10 )
digits.append(snake_case )
def snake_case_ ( snake_case = 10**15 ) -> int:
lowercase__: Optional[Any] = [1]
lowercase__: int = 1
lowercase__: Tuple = 0
while True:
lowercase__ , lowercase__: str = next_term(snake_case , 20 , i + dn , snake_case )
dn += terms_jumped
if dn == n - i:
break
lowercase__: Dict = 0
for j in range(len(snake_case ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196
| 0
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a : int = '__DUMMY_TRANSFORMERS_USER__'
_a : List[Any] = 'Dummy User'
_a : str = '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'
_a : Optional[Any] = 'https://hub-ci.huggingface.co'
_a : Tuple = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a : Any = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a : Tuple = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" ,_lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" ,_lowerCamelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" ,_lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Optional[int]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" ,_lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : str ) -> Optional[int]:
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : HfApi ) -> Optional[int]:
_lowerCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Any:
def _cleanup_repo(_lowerCamelCase : Tuple ):
hf_api.delete_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
@contextmanager
def _temporary_repo(_lowerCamelCase : Optional[int] ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : HfApi ,_lowerCamelCase : str ,_lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : Any = f"repo_txt_data-{int(time.time() * 10e3 )}"
_lowerCAmelCase : str = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" ,private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase ,path_or_fileobj=str(_lowerCamelCase ) ,path_in_repo="""data/text_data.txt""" ,repo_id=_lowerCamelCase ,repo_type="""dataset""" ,)
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any] ) -> int:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : HfApi ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ) -> int:
_lowerCAmelCase : Tuple = f"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
_lowerCAmelCase : Optional[Any] = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" ,private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase ,path_or_fileobj=str(_lowerCamelCase ) ,path_in_repo="""data.zip""" ,repo_id=_lowerCamelCase ,repo_type="""dataset""" ,)
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : HfApi ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Any:
_lowerCAmelCase : Tuple = f"repo_zipped_img_data-{int(time.time() * 10e3 )}"
_lowerCAmelCase : Optional[int] = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" ,private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase ,path_or_fileobj=str(_lowerCamelCase ) ,path_in_repo="""data.zip""" ,repo_id=_lowerCamelCase ,repo_type="""dataset""" ,)
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase ,token=_lowerCamelCase ,repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ) -> List[str]:
return hf_private_dataset_repo_zipped_img_data_
| 359
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=0.9 , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 30}
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : str = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : int = crop_pct
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean
_lowerCAmelCase : Optional[Any] = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """crop_pct""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : int = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 126
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
'''simple docstring'''
def _UpperCamelCase ( self , a ) -> str:
raise NotImplementedError()
def _UpperCamelCase ( self ) -> List[Any]:
raise NotImplementedError()
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a = False , **a ) -> Tuple:
snake_case_ = tokenizer
snake_case_ = skip_prompt
snake_case_ = decode_kwargs
# variables used in the streaming process
snake_case_ = []
snake_case_ = 0
snake_case_ = True
def _UpperCamelCase ( self , a ) -> Dict:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
snake_case_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
snake_case_ = text[self.print_len :]
snake_case_ = []
snake_case_ = 0
# If the last token is a CJK character, we print the characters.
elif len(__SCREAMING_SNAKE_CASE ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case_ = text[self.print_len :]
self.print_len += len(__SCREAMING_SNAKE_CASE )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case_ = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__SCREAMING_SNAKE_CASE )
self.on_finalized_text(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Union[str, Any]:
if len(self.token_cache ) > 0:
snake_case_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case_ = text[self.print_len :]
snake_case_ = []
snake_case_ = 0
else:
snake_case_ = ''
snake_case_ = True
self.on_finalized_text(__SCREAMING_SNAKE_CASE , stream_end=__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , a , a = False ) -> str:
print(__SCREAMING_SNAKE_CASE , flush=__SCREAMING_SNAKE_CASE , end='' if not stream_end else None )
def _UpperCamelCase ( self , a ) -> List[str]:
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a = False , a = None , **a ) -> List[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case_ = Queue()
snake_case_ = None
snake_case_ = timeout
def _UpperCamelCase ( self , a , a = False ) -> Any:
self.text_queue.put(__SCREAMING_SNAKE_CASE , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[Any]:
return self
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 178
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCAmelCase : Optional[Any] = None
try:
import msvcrt
except ImportError:
UpperCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
UpperCAmelCase : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCAmelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
UpperCAmelCase : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
UpperCAmelCase : Tuple = '3.0.12'
UpperCAmelCase : str = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.lock
def __exit__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
__SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE = None
# The default timeout value.
__SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE = 0
return None
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = float(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=0.05 ) -> Optional[Any]:
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
__SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Dict:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__SCREAMING_SNAKE_CASE = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE = os.path.dirname(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = str(hash(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=-1 , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCAmelCase : Dict = None
if msvcrt:
UpperCAmelCase : Optional[int] = WindowsFileLock
elif fcntl:
UpperCAmelCase : Optional[Any] = UnixFileLock
else:
UpperCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 267
| 0
|
def __magic_name__( lowerCamelCase, lowerCamelCase):
if not len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__) == 3:
raise ValueError('''Please enter a valid equation.''')
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''')
# Extract the coefficients
__lowerCAmelCase = equationa
__lowerCAmelCase = equationa
# Calculate the determinants of the matrices
__lowerCAmelCase = aa * ba - aa * ba
__lowerCAmelCase = ca * ba - ca * ba
__lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''')
else:
raise ValueError('''No solution. (Inconsistent system)''')
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCAmelCase = determinant_x / determinant
__lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 366
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase__ = threading.Lock()
UpperCAmelCase__ = None
UpperCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCAmelCase__ = logging.WARNING
UpperCAmelCase__ = True
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =os.getenv('''TRANSFORMERS_VERBOSITY''' , __snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def UpperCAmelCase_ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase =logging.StreamHandler() # Set sys.stderr as stream.
_lowercase =sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowercase =False
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowercase =None
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return log_levels
def UpperCAmelCase_ ( __snake_case = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
_lowercase =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__snake_case )
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__snake_case )
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
return set_verbosity(__snake_case )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(__snake_case )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(__snake_case )
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(__snake_case )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__snake_case )
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__snake_case )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_lowercase =False
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_lowercase =True
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase =_get_library_root_logger().handlers
for handler in handlers:
_lowercase =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__snake_case )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __snake_case )
if no_advisory_warnings:
return
self.warning(*__snake_case , **__snake_case )
UpperCAmelCase__ = warning_advice
@functools.lru_cache(__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ) -> Union[str, Any]:
"""simple docstring"""
self.warning(*__snake_case , **__snake_case )
UpperCAmelCase__ = warning_once
class lowerCamelCase__ :
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> str: # pylint: disable=unused-argument
_lowercase =args[0] if args else None
def __iter__(self ) -> Any:
return iter(self._iterator )
def __getattr__(self , UpperCAmelCase ) -> str:
def empty_fn(*UpperCAmelCase , **UpperCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> List[Any]:
return self
def __exit__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
return
class lowerCamelCase__ :
def __call__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCAmelCase , **UpperCAmelCase )
else:
return EmptyTqdm(*UpperCAmelCase , **UpperCAmelCase )
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
_lowercase =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase , **UpperCAmelCase )
def __A (self ) -> Tuple:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ = _tqdm_cls()
def UpperCAmelCase_ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
global _tqdm_active
_lowercase =True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
global _tqdm_active
_lowercase =False
hf_hub_utils.disable_progress_bars()
| 5
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase = 'altclip_text_model'
def __init__( self ,a_=250_002 ,a_=1_024 ,a_=24 ,a_=16 ,a_=4_096 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=514 ,a_=1 ,a_=0.02 ,a_=0.02 ,a_=1E-0_5 ,a_=1 ,a_=0 ,a_=2 ,a_="absolute" ,a_=True ,a_=768 ,**a_ ,) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : str = initializer_factor
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : Union[str, Any] = use_cache
_UpperCAmelCase : List[str] = project_dim
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase = 'altclip_vision_model'
def __init__( self ,a_=768 ,a_=3_072 ,a_=512 ,a_=12 ,a_=12 ,a_=3 ,a_=224 ,a_=32 ,a_="quick_gelu" ,a_=1E-5 ,a_=0.0 ,a_=0.02 ,a_=1.0 ,**a_ ,) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Union[str, Any] = projection_dim
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Optional[int] = initializer_factor
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Dict = hidden_act
@classmethod
def _snake_case ( cls ,a_ ,**a_ ) -> str:
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : int = cls.get_config_dict(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
_UpperCAmelCase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase = 'altclip'
UpperCAmelCase = True
def __init__( self ,a_=None ,a_=None ,a_=768 ,a_=2.6592 ,**a_ ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_UpperCAmelCase : Dict = kwargs.pop("""text_config_dict""" ,SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""vision_config_dict""" ,SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCAmelCase : List[str] = {}
# This is the complete result when using `text_config_dict`.
_UpperCAmelCase : int = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCAmelCase : List[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase : Any = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_UpperCAmelCase : List[str] = {}
# This is the complete result when using `vision_config_dict`.
_UpperCAmelCase : Dict = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCAmelCase : List[Any] = {
str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCAmelCase : int = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase : List[Any] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
_UpperCAmelCase : str = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
_UpperCAmelCase : str = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : List[Any] = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : List[Any] = projection_dim
_UpperCAmelCase : Union[str, Any] = logit_scale_init_value
_UpperCAmelCase : Union[str, Any] = 1.0
@classmethod
def _snake_case ( cls ,a_ ,a_ ,**a_ ) -> Dict:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.text_config.to_dict()
_UpperCAmelCase : Union[str, Any] = self.vision_config.to_dict()
_UpperCAmelCase : Any = self.__class__.model_type
return output
| 355
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator('This is a test' , do_sample=a )
self.assertEqual(
a , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
snake_case_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
a , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
snake_case_ = text_generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{'generated_token_ids': ANY(a )},
{'generated_token_ids': ANY(a )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = '<pad>'
snake_case_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(a )},
{'generated_token_ids': ANY(a )},
],
[
{'generated_token_ids': ANY(a )},
{'generated_token_ids': ANY(a )},
],
] , )
@require_tf
def _UpperCamelCase ( self ) -> str:
snake_case_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator('This is a test' , do_sample=a )
self.assertEqual(
a , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
snake_case_ = text_generator(['This is a test', 'This is a second test'] , do_sample=a )
self.assertEqual(
a , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = TextGenerationPipeline(model=a , tokenizer=a )
return text_generator, ["This is a test", "Another test"]
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = 'Hello I believe in'
snake_case_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
snake_case_ = text_generator(a )
self.assertEqual(
a , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
snake_case_ = text_generator(a , stop_sequence=' fe' )
self.assertEqual(a , [{'generated_text': 'Hello I believe in fe'}] )
def _UpperCamelCase ( self , a , a ) -> Optional[int]:
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator('This is a test' )
self.assertEqual(a , [{'generated_text': ANY(a )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
snake_case_ = text_generator('This is a test' , return_full_text=a )
self.assertEqual(a , [{'generated_text': ANY(a )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
snake_case_ = pipeline(task='text-generation' , model=a , tokenizer=a , return_full_text=a )
snake_case_ = text_generator('This is a test' )
self.assertEqual(a , [{'generated_text': ANY(a )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
snake_case_ = text_generator('This is a test' , return_full_text=a )
self.assertEqual(a , [{'generated_text': ANY(a )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
snake_case_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{'generated_text': ANY(a )}, {'generated_text': ANY(a )}],
[{'generated_text': ANY(a )}, {'generated_text': ANY(a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{'generated_text': ANY(a )}, {'generated_text': ANY(a )}],
[{'generated_text': ANY(a )}, {'generated_text': ANY(a )}],
] , )
with self.assertRaises(a ):
snake_case_ = text_generator('test' , return_full_text=a , return_text=a )
with self.assertRaises(a ):
snake_case_ = text_generator('test' , return_full_text=a , return_tensors=a )
with self.assertRaises(a ):
snake_case_ = text_generator('test' , return_text=a , return_tensors=a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator('' )
self.assertEqual(a , [{'generated_text': ANY(a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_00 , max_new_tokens=20 )
snake_case_ = text_generator('This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a ):
text_generator(
'This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self ) -> Union[str, Any]:
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def _UpperCamelCase ( self ) -> str:
import torch
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCamelCase ( self ) -> Union[str, Any]:
import torch
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=a , top_p=0.5 )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = 'Hello world'
snake_case_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger('transformers.generation.tf_utils' )
else:
snake_case_ = logging.get_logger('transformers.generation.utils' )
snake_case_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a ) as cl:
snake_case_ = text_generator(a , max_length=10 , max_new_tokens=1 )
self.assertIn(a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a ) as cl:
snake_case_ = text_generator(a , max_new_tokens=1 )
self.assertNotIn(a , cl.out )
with CaptureLogger(a ) as cl:
snake_case_ = text_generator(a , max_length=10 )
self.assertNotIn(a , cl.out )
| 178
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a , a ) -> Tuple:
snake_case_ = False
super().__init__(a , a )
snake_case_ = self.image_processor
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(a , return_tensors=a )
if text is not None:
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def _UpperCamelCase ( self , *a , **a ) -> int:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self , *a , **a ) -> Any:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178
| 1
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_: List[str] =get_logger()
SCREAMING_SNAKE_CASE_: Optional[dict] =None
class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__(self : List[Any] , __a : Optional[int]=None , __a : Any=None , **__a : Dict ):
super().__init__(features=__a )
import jax
from jaxlib.xla_client import Device
if isinstance(__a , __a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__a )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ = device if isinstance(__a , __a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase_ = str(jax.devices()[0] )
UpperCAmelCase_ = jnp_array_kwargs
@staticmethod
def _lowercase ():
import jax
return {str(__a ): device for device in jax.devices()}
def _lowercase (self : str , __a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__a , axis=0 )
return column
def _lowercase (self : Any , __a : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ = {"dtype": jnp.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__a , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase (self : int , __a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__a , "__array__" ) and not isinstance(__a , jax.Array ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 106
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 106
| 1
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_A : List[Any] ={
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
_A : Optional[int] ='''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
_A : Dict ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict[str, int]:
lowerCamelCase__ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Optional[int] = get_letter_count(snake_case_ )
lowerCamelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
lowerCamelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=snake_case_ )
lowerCamelCase__ : int = ''.join(freq_to_letter[freq] )
lowerCamelCase__ : Union[str, Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ , reverse=snake_case_ )
lowerCamelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : List[Any] = get_frequency_order(snake_case_ )
lowerCamelCase__ : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 126
| 0
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict =logging.get_logger(__name__)
# TODO Update this
A__ : Tuple ={
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[str] = '''esm'''
def __init__( self : List[Any] , __snake_case : Dict=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=7_68 , __snake_case : Dict=12 , __snake_case : List[str]=12 , __snake_case : Any=30_72 , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Any=10_26 , __snake_case : str=0.02 , __snake_case : Dict=1E-1_2 , __snake_case : Union[str, Any]="absolute" , __snake_case : Optional[int]=True , __snake_case : Dict=None , __snake_case : Dict=False , __snake_case : Union[str, Any]=False , __snake_case : Dict=None , __snake_case : Any=None , **__snake_case : List[str] , ) -> str:
super().__init__(pad_token_id=__snake_case , mask_token_id=__snake_case , **__snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = emb_layer_norm_before
_lowerCAmelCase = token_dropout
_lowerCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_lowerCAmelCase = EsmFoldConfig()
elif isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = EsmFoldConfig(**__snake_case )
_lowerCAmelCase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_lowerCAmelCase = get_default_vocab_list()
else:
_lowerCAmelCase = vocab_list
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __snake_case ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self : Tuple ) -> List[str]:
_lowerCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , __snake_case ):
_lowerCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
_lowercase: str = None
_lowercase: bool = True
_lowercase: bool = False
_lowercase: bool = False
_lowercase: bool = False
_lowercase: float = 0
_lowercase: bool = True
_lowercase: bool = False
_lowercase: int = 128
_lowercase: "TrunkConfig" = None
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
if self.trunk is None:
_lowerCAmelCase = TrunkConfig()
elif isinstance(self.trunk , __snake_case ):
_lowerCAmelCase = TrunkConfig(**self.trunk )
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
_lowercase: int = 48
_lowercase: int = 1024
_lowercase: int = 128
_lowercase: int = 32
_lowercase: int = 32
_lowercase: int = 32
_lowercase: float = 0
_lowercase: float = 0
_lowercase: bool = False
_lowercase: int = 4
_lowercase: Optional[int] = 128
_lowercase: "StructureModuleConfig" = None
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
if self.structure_module is None:
_lowerCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , __snake_case ):
_lowerCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = asdict(self )
_lowerCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
_lowercase: int = 384
_lowercase: int = 128
_lowercase: int = 16
_lowercase: int = 128
_lowercase: int = 12
_lowercase: int = 4
_lowercase: int = 8
_lowercase: float = 0.1
_lowercase: int = 8
_lowercase: int = 1
_lowercase: int = 2
_lowercase: int = 7
_lowercase: int = 10
_lowercase: float = 1E-8
_lowercase: float = 1E5
def lowercase__ ( self : List[str] ) -> Optional[int]:
return asdict(self )
def UpperCamelCase__ ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 220
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A__ : int ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 220
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__a = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with open(UpperCAmelCase__ , '''rb''' ) as f:
UpperCAmelCase_ : Union[str, Any] = Image.open(UpperCAmelCase__ )
return im.convert('''RGB''' )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default=_snake_case , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
lowerCAmelCase = field(
default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(default=_snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase = field(default=_snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase = field(
default=_snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=_snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def a__ ( self ) -> List[str]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCAmelCase = field(
default=_snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_snake_case )} , )
lowerCAmelCase = field(
default=_snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(default=_snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase = field(
default=_snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=_snake_case , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] )
UpperCAmelCase_ : Any = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : str = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase_ : Any = {}
if data_args.train_dir is not None:
UpperCAmelCase_ : List[Any] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
UpperCAmelCase_ : Optional[Any] = os.path.join(data_args.validation_dir , '''**''' )
UpperCAmelCase_ : Tuple = load_dataset(
'''imagefolder''' , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ : List[str] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ : Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
UpperCAmelCase_ : Optional[Any] = split['''train''']
UpperCAmelCase_ : Dict = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase_ : Union[str, Any] = dataset['''train'''].features['''labels'''].names
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = {}, {}
for i, label in enumerate(UpperCAmelCase__ ):
UpperCAmelCase_ : Dict = str(UpperCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
UpperCAmelCase_ : Optional[Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase__ ) , labelaid=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ : Optional[int] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCAmelCase_ : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCAmelCase_ : Union[str, Any] = image_processor.size['''shortest_edge''']
else:
UpperCAmelCase_ : Any = (image_processor.size['''height'''], image_processor.size['''width'''])
UpperCAmelCase_ : Tuple = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCAmelCase_ : List[str] = Compose(
[
RandomResizedCrop(UpperCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCAmelCase_ : List[str] = Compose(
[
Resize(UpperCAmelCase__ ),
CenterCrop(UpperCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(_lowercase ):
UpperCAmelCase_ : List[str] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_lowercase ):
UpperCAmelCase_ : int = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Optional[int] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : int = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCAmelCase__ )
# Initalize our trainer
UpperCAmelCase_ : List[str] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase_ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Tuple = last_checkpoint
UpperCAmelCase_ : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase__ )
trainer.save_metrics('''eval''' , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 363
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
UpperCAmelCase_ : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCAmelCase_ : Union[str, Any] = self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
UpperCAmelCase_ : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE )
if do_rescale:
UpperCAmelCase_ : str = self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE )
if do_normalize:
UpperCAmelCase_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return image
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : int = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase_ : List[Any] = make_batched(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,crop_size=_SCREAMING_SNAKE_CASE ,do_rescale=_SCREAMING_SNAKE_CASE ,rescale_factor=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,image_mean=_SCREAMING_SNAKE_CASE ,image_std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : Any = {'''pixel_values''': videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
| 235
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _snake_case ( a__ ):
snake_case__ = "bert-generation"
def __init__( self : Optional[int] , UpperCAmelCase : Dict=50358 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : str=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
| 356
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict=False ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : Any = ""
else:
__lowerCamelCase : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : str = in_proj_bias[: config.hidden_size]
__lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] ) -> Any:
'''simple docstring'''
__lowerCamelCase : str = dct.pop(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : int = ViTMSNConfig()
__lowerCamelCase : Dict = 1000
__lowerCamelCase : str = "datasets/huggingface/label-files"
__lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) )
__lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : int = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCamelCase : int = 384
__lowerCamelCase : Optional[int] = 1536
__lowerCamelCase : str = 6
elif "l16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 1024
__lowerCamelCase : str = 4096
__lowerCamelCase : Any = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
__lowerCamelCase : str = 7
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 4096
__lowerCamelCase : Union[str, Any] = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : List[Any] = 0.1
__lowerCamelCase : str = ViTMSNModel(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"]
__lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
__lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__lowerCamelCase : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__lowerCamelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : Optional[int] = model(**_lowerCamelCase )
__lowerCamelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCamelCase : Any = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__lowerCamelCase : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__lowerCamelCase : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__lowerCamelCase : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "t5"
lowercase__ = ["past_key_values"]
lowercase__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Dict ,lowercase_ : List[Any]=3_2_1_2_8 ,lowercase_ : List[str]=5_1_2 ,lowercase_ : List[str]=6_4 ,lowercase_ : str=2_0_4_8 ,lowercase_ : Any=6 ,lowercase_ : Any=None ,lowercase_ : Any=8 ,lowercase_ : List[Any]=3_2 ,lowercase_ : Dict=1_2_8 ,lowercase_ : List[Any]=0.1 ,lowercase_ : Any=1E-6 ,lowercase_ : Any=1.0 ,lowercase_ : List[Any]="relu" ,lowercase_ : List[str]=True ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=0 ,lowercase_ : Tuple=1 ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Union[str, Any] = d_model
lowerCAmelCase__ : int = d_kv
lowerCAmelCase__ : str = d_ff
lowerCAmelCase__ : Optional[Any] = num_layers
lowerCAmelCase__ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ : Dict = num_heads
lowerCAmelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCAmelCase__ : Optional[Any] = relative_attention_max_distance
lowerCAmelCase__ : Any = dropout_rate
lowerCAmelCase__ : Optional[Any] = layer_norm_epsilon
lowerCAmelCase__ : Dict = initializer_factor
lowerCAmelCase__ : str = feed_forward_proj
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = self.feed_forward_proj.split('''-''' )
lowerCAmelCase__ : Tuple = act_info[-1]
lowerCAmelCase__ : List[Any] = act_info[0] == '''gated'''
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowercase_ ,eos_token_id=lowercase_ ,is_encoder_decoder=lowercase_ ,**lowercase_ ,)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowerCAmelCase__ : Tuple = '''past_encoder_sequence + sequence'''
lowerCAmelCase__ : List[Any] = {0: '''batch'''}
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ ,direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Any ):
return 1_3
| 106
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[Any] ,lowercase_ : int ,lowercase_ : str ):
lowerCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''nateraw/video-demo''' ,filename='''archery.mp4''' ,repo_type='''dataset''' )
lowerCAmelCase__ : Tuple = VideoClassificationPipeline(model=lowercase_ ,image_processor=lowercase_ ,top_k=2 )
lowerCAmelCase__ : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowerCAmelCase ( self : str ,lowercase_ : int ,lowercase_ : Dict ):
for example in examples:
lowerCAmelCase__ : Dict = video_classifier(lowercase_ )
self.assertEqual(
lowercase_ ,[
{'''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ )},
{'''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ )},
] ,)
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Optional[int] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase__ : List[str] = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} ,crop_size={'''height''': 1_0, '''width''': 1_0} )
lowerCAmelCase__ : Optional[Any] = pipeline(
'''video-classification''' ,model=lowercase_ ,feature_extractor=lowercase_ ,frame_sampling_rate=4 )
lowerCAmelCase__ : Optional[int] = hf_hub_download(repo_id='''nateraw/video-demo''' ,filename='''archery.mp4''' ,repo_type='''dataset''' )
lowerCAmelCase__ : Optional[int] = video_classifier(lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] ,)
lowerCAmelCase__ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] ,)
@require_tf
def __lowerCAmelCase ( self : int ):
pass
| 106
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__snake_case = TypeVar('''T''')
class __lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = None
_a = len(a_ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def _UpperCAmelCase ( self ) -> Optional[int]:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: # noqa: E741
_a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(a_ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(a_ , self.st[r] )
_a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__snake_case = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__snake_case = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__snake_case = SegmentTree(test_array, min)
__snake_case = SegmentTree(test_array, max)
__snake_case = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ):
"""simple docstring"""
for i in range(len(_snake_case ) ):
for j in range(_snake_case, len(_snake_case ) ):
_a = reduce(_snake_case, test_array[i : j + 1] )
_a = reduce(_snake_case, test_array[i : j + 1] )
_a = reduce(lambda _lowerCAmelCase, _lowerCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_snake_case, _snake_case )
assert max_range == max_segment_tree.query(_snake_case, _snake_case )
assert sum_range == sum_segment_tree.query(_snake_case, _snake_case )
test_all_segments()
for index, value in test_updates.items():
__snake_case = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 352
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
lowercase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) )
def UpperCamelCase_ ( self ):
lowercase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
lowercase = get_activation('gelu' )
lowercase = get_activation('gelu_10' )
lowercase = torch_builtin(_lowerCamelCase )
lowercase = geluaa(_lowerCamelCase )
lowercase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCamelCase_ ( self ):
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(_lowerCamelCase ):
get_activation('bogus' )
with self.assertRaises(_lowerCamelCase ):
get_activation(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = get_activation('gelu' )
lowercase = 1
lowercase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCamelCase ):
lowercase = acta.a
| 220
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Dict = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
| 1
|
from __future__ import annotations
_lowercase: Any = tuple[int, int, int]
_lowercase: List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowercase: int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowercase: Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_lowercase: List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
_lowercase: Tuple = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_lowercase: Optional[Any] = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_lowercase: Dict = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_lowercase: Tuple = "SGLCPQWZHKXAREONTFBVIYJUDM"
_lowercase: Dict = "HVSICLTYKQUBXDWAJZOMFGPREN"
_lowercase: Tuple = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_lowercase: List[str] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_lowercase: Any = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def a( A : RotorPositionT , A : RotorSelectionT , A : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(A ) )) < 3:
a = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A )
# Checks if rotor positions are valid
a , a , a = rotpos
if not 0 < rotorposa <= len(A ):
a = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A )
if not 0 < rotorposa <= len(A ):
a = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A )
if not 0 < rotorposa <= len(A ):
a = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A )
# Validates string and returns dict
a = _plugboard(A )
return rotpos, rotsel, pbdict
def a( A : str ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(A , A ):
a = f'''Plugboard setting isn\'t type string ({type(A )})'''
raise TypeError(A )
elif len(A ) % 2 != 0:
a = f'''Odd number of symbols ({len(A )})'''
raise Exception(A )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
a = set()
for i in pbstring:
if i not in abc:
a = f'''\'{i}\' not in list of symbols'''
raise Exception(A )
elif i in tmppbl:
a = f'''Duplicate symbol ({i})'''
raise Exception(A )
else:
tmppbl.add(A )
del tmppbl
# Created the dictionary
a = {}
for j in range(0 , len(A ) - 1 , 2 ):
a = pbstring[j + 1]
a = pbstring[j]
return pb
def a( A : str , A : RotorPositionT , A : RotorSelectionT = (rotora, rotora, rotora) , A : str = "" , ) -> str:
"""simple docstring"""
a = text.upper()
a , a , a = _validator(
A , A , plugb.upper() )
a , a , a = rotor_position
a , a , a = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
a = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
a = plugboard[symbol]
# rotor ra --------------------------
a = abc.index(A ) + rotorposa
a = rotora[index % len(A )]
# rotor rb --------------------------
a = abc.index(A ) + rotorposa
a = rotora[index % len(A )]
# rotor rc --------------------------
a = abc.index(A ) + rotorposa
a = rotora[index % len(A )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
a = reflector[symbol]
# 2nd rotors
a = abc[rotora.index(A ) - rotorposa]
a = abc[rotora.index(A ) - rotorposa]
a = abc[rotora.index(A ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
a = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A ):
a = 0
rotorposa += 1
if rotorposa >= len(A ):
a = 0
rotorposa += 1
if rotorposa >= len(A ):
a = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A )
return "".join(A )
if __name__ == "__main__":
_lowercase: Optional[int] = "This is my Python script that emulates the Enigma machine from WWII."
_lowercase: Dict = (1, 1, 1)
_lowercase: Dict = "pictures"
_lowercase: Tuple = (rotora, rotora, rotora)
_lowercase: Optional[int] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 352
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "ViTImageProcessor"
__A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None and images is not None:
a = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 71
| 0
|
UpperCAmelCase : Optional[int] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 95
|
import logging
from transformers import PretrainedConfig
a__ = logging.getLogger(__name__)
a__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bertabs"
def __init__( self , _a=3_0_5_2_2 , _a=5_1_2 , _a=6 , _a=5_1_2 , _a=8 , _a=5_1_2 , _a=0.2 , _a=6 , _a=7_6_8 , _a=8 , _a=2_0_4_8 , _a=0.2 , **_a , ) -> Any:
super().__init__(**_a )
_a : int = vocab_size
_a : List[str] = max_pos
_a : Tuple = enc_layers
_a : Optional[Any] = enc_hidden_size
_a : int = enc_heads
_a : Optional[Any] = enc_ff_size
_a : List[str] = enc_dropout
_a : Tuple = dec_layers
_a : Optional[Any] = dec_hidden_size
_a : Optional[Any] = dec_heads
_a : Optional[Any] = dec_ff_size
_a : List[Any] = dec_dropout
| 235
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self ):
__a = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__a = controlnet_params
__a = '''bird'''
__a = jax.device_count()
__a = pipe.prepare_text_inputs([prompts] * num_samples )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__a = pipe.prepare_image_inputs([canny_image] * num_samples )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(lowerCamelCase__ , jax.device_count() )
__a = replicate(lowerCamelCase__ )
__a = shard(lowerCamelCase__ )
__a = shard(lowerCamelCase__ )
__a = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 253:256, 253:256, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
__a = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__a = controlnet_params
__a = '''Chef in the kitchen'''
__a = jax.device_count()
__a = pipe.prepare_text_inputs([prompts] * num_samples )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__a = pipe.prepare_image_inputs([pose_image] * num_samples )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(lowerCamelCase__ , jax.device_count() )
__a = replicate(lowerCamelCase__ )
__a = shard(lowerCamelCase__ )
__a = shard(lowerCamelCase__ )
__a = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 253:256, 253:256, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11
| 0
|
import baseaa
def UpperCamelCase ( __lowerCamelCase : str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( __lowerCamelCase : bytes ):
return baseaa.baadecode(snake_case__ ).decode("utf-8" )
if __name__ == "__main__":
__lowerCamelCase = """Hello World!"""
__lowerCamelCase = baseaa_encode(test)
print(encoded)
__lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 59
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64
| 0
|
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : Any = [0 for i in range(n + 1 )]
lowercase : Any = 1
lowercase : Optional[Any] = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, _UpperCamelCase ):
lowercase : Optional[int] = 1
lowercase : Dict = 0
for i in range(_UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173
|
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : list[int] = [0]
lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase : int = 0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str = floor(_UpperCamelCase )
lowercase : int = ceil(_UpperCamelCase )
lowercase : str = triangle_numbers[b_floor]
lowercase : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Optional[int] = triangle_b_first_guess * triangle_a
lowercase : Tuple = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Dict = triangle_b_second_guess * triangle_a
lowercase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : str = StableDiffusionLatentUpscalePipeline
lowercase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
lowercase_ : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
lowercase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Dict = frozenset([] )
lowercase_ : Dict = True
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = 1
_lowercase : List[str] = 4
_lowercase : Optional[Any] = (16, 16)
_lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=lowerCamelCase, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=1_60, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=lowerCamelCase, only_cross_attention=lowerCamelCase, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
_lowercase : Optional[Any] = EulerDiscreteScheduler(prediction_type='sample')
_lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, hidden_act='quick_gelu', projection_dim=5_12, )
_lowercase : Optional[Any] = CLIPTextModel(lowerCamelCase)
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu'
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 2_56, 2_56, 3))
_lowercase : Union[str, Any] = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
_lowercase : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Dict = self.pipeline_class(**lowerCamelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = 2
_lowercase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowercase : int = getattr(lowerCamelCase, scheduler_enum.name)
_lowercase : Dict = scheduler_cls.from_config(pipe.scheduler.config)
_lowercase : str = pipe(**lowerCamelCase)[0]
outputs.append(lowerCamelCase)
assert check_same_shape(lowerCamelCase)
@require_torch_gpu
@slow
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa)
pipe.to('cuda')
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : List[str] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowercase : Any = pipe(lowerCamelCase, generator=lowerCamelCase, output_type='latent').images
_lowercase : List[Any] = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowercase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
_lowercase : int = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5E-2
| 21
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
UpperCamelCase = 4
UpperCamelCase = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 153
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
def snake_case ( snake_case__ :str) -> int:
_A = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""")
_A = hex_num[0] == """-"""
if is_negative:
_A = hex_num[1:]
try:
_A = int(snake_case__ , 16)
except ValueError:
raise ValueError("""Invalid value was passed to the function""")
_A = """"""
while int_num > 0:
_A = str(int_num % 2) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
| 0
|
"""simple docstring"""
def __lowerCamelCase ( a_ : str = 1_00_00_00 ) -> int:
__SCREAMING_SNAKE_CASE :List[Any] = limit + 1
__SCREAMING_SNAKE_CASE :Any = [0] * limit
for first_term in range(1 , a_ ):
for n in range(a_ , a_ , a_ ):
__SCREAMING_SNAKE_CASE :str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__SCREAMING_SNAKE_CASE :Dict = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 191
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[str] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A_ :Optional[Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def A ( a_ ,a_ ) -> str:
__UpperCamelCase : Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__UpperCamelCase : Tuple =int(re.match(r'.*layer_(\d*).*' ,a_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def A ( a_ ) -> Any:
if dtype == torch.bool:
return 1 / 8
__UpperCamelCase : Dict =re.search(r'[^\d](\d+)$' ,str(a_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
__UpperCamelCase : Tuple =int(bit_search.groups()[0] )
return bit_size // 8
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
# Construct model
if bloom_config_file == "":
__UpperCamelCase : List[Any] =BloomConfig()
else:
__UpperCamelCase : List[str] =BloomConfig.from_json_file(a_ )
if shard_model:
__UpperCamelCase : int =os.listdir(a_ )
__UpperCamelCase : Union[str, Any] =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Optional[Any] ={'weight_map': {}, 'metadata': {}}
__UpperCamelCase : Dict =0
__UpperCamelCase : int =None
__UpperCamelCase : Any =BloomConfig()
for j, file in enumerate(a_ ):
print('Processing file: {}'.format(a_ ) )
__UpperCamelCase : Optional[int] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Dict =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : Optional[Any] =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : int =list(temp.keys() )
for key in keys:
__UpperCamelCase : Dict =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Any =temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : Any =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Optional[Any] =tensors[key] / pretraining_tp
torch.save(
a_ ,os.path.join(
a_ ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
__UpperCamelCase : Union[str, Any] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__UpperCamelCase : int ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) )
__UpperCamelCase : Union[str, Any] =BloomConfig()
__UpperCamelCase : Tuple =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase : Optional[int] =total_size
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
__UpperCamelCase : List[Any] =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n'
f.write(a_ )
else:
__UpperCamelCase : List[Any] =BloomModel(a_ )
__UpperCamelCase : Optional[Any] =os.listdir(a_ )
__UpperCamelCase : Dict =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Any =None
for i, file in enumerate(a_ ):
__UpperCamelCase : Union[str, Any] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Optional[Any] =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : str =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : List[str] =list(temp.keys() )
for key in keys:
__UpperCamelCase : Union[str, Any] =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Optional[Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : int =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Dict =tensors[key] / pretraining_tp
__UpperCamelCase : str =model.load_state_dict(a_ ,strict=a_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__UpperCamelCase : str =set(other_keys.missing_keys )
else:
__UpperCamelCase : int =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Dict =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__UpperCamelCase : List[str] =model.to(config.torch_dtype )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A_ :str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 71
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_A = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_A = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_A = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_A = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def a__ ( lowerCAmelCase ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase__ : Tuple = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase__ : List[Any] = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : int = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : Any = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
UpperCAmelCase__ : str = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : int = tf_models
UpperCAmelCase__ : Any = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : Union[str, Any] = flax_models
UpperCAmelCase__ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : Tuple = pt_models
UpperCAmelCase__ : Optional[Any] = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase__ : Union[str, Any] = True
break
# Try again after removing the last word in the name
UpperCAmelCase__ : str = """""".join(camel_case_split(lowerCAmelCase )[:-1] )
UpperCAmelCase__ : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase__ : Union[str, Any] = list(lowerCAmelCase )
all_models.sort()
UpperCAmelCase__ : List[str] = {"""model_type""": all_models}
UpperCAmelCase__ : int = [pt_models[t] for t in all_models]
UpperCAmelCase__ : Tuple = [tf_models[t] for t in all_models]
UpperCAmelCase__ : Tuple = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase__ : Optional[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase__ : List[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase__ : Optional[int] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase__ : Any = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase__ : Optional[Any] = """AutoTokenizer"""
UpperCAmelCase__ : str = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> List[str]:
UpperCAmelCase__ : Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase__ : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
UpperCAmelCase__ : Optional[Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
UpperCAmelCase__ : str = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Union[str, Any] = get_frameworks_table()
UpperCAmelCase__ : str = Dataset.from_pandas(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = Dataset.from_json(lowerCAmelCase )
UpperCAmelCase__ : Dict = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(lowerCAmelCase ) )
}
UpperCAmelCase__ : List[Any] = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase__ : str = sorted(table.keys() )
UpperCAmelCase__ : str = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
UpperCAmelCase__ : Any = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , """pipeline_tags.json""" ) )
if commit_sha is not None:
UpperCAmelCase__ : Optional[Any] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
UpperCAmelCase__ : str = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=lowerCAmelCase , repo_type="""dataset""" , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def a__ ( ) -> str:
UpperCAmelCase__ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase__ : Any = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase__ : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase__ : Union[str, Any] = pipeline_tasks[key]["""pt"""]
if isinstance(lowerCAmelCase , (list, tuple) ):
UpperCAmelCase__ : Optional[Any] = model[0]
UpperCAmelCase__ : Optional[Any] = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
UpperCAmelCase__ : str = """, """.join(lowerCAmelCase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
_A = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 166
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_37_81_37
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ : int = (b_lata + b_lata) / 2
UpperCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2
UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ : str = logging.getLogger()
def A (__A : Path , __A : list ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
snake_case_ : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
snake_case_ : int = "sshleifer/bart-tiny-random"
snake_case_ : List[str] = "sshleifer/tiny-mbart"
snake_case_ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __snake_case ( a ):
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source'''
UpperCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase_ = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case)
UpperCAmelCase_ = str(Path(self.get_auto_remove_tmp_dir()) / '''scores.json''')
UpperCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase_ = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case):
run_generate()
assert Path(_snake_case).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self.run_eval_tester(_snake_case)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple):
"""simple docstring"""
self.run_eval_tester(_snake_case)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def lowerCamelCase ( self : str , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source'''
UpperCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase_ = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = str(tmp_dir / '''scores.json''')
UpperCAmelCase_ = str(tmp_dir / '''val.target''')
_dump_articles(_snake_case , text['''en'''])
_dump_articles(_snake_case , text['''de'''])
UpperCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase_ = F"""
run_eval_search.py
{model}
{str(_snake_case)}
{str(_snake_case)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''])
with patch.object(_snake_case , '''argv''' , _snake_case):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase_ = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase_ = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''')
else:
expected_strings.extend(_snake_case)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case).exists()
os.remove(Path(_snake_case))
| 51
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11
| 0
|
"""simple docstring"""
def A_ ( snake_case_ : List[str] ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352
|
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
return f'Node({self.data})'
class lowerCamelCase :
def __init__( self ):
UpperCamelCase : Dict = None
def __iter__( self ):
UpperCamelCase : int = self.head
while node:
yield node.data
UpperCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCamelCase : List[Any] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = current.next
UpperCamelCase : Optional[Any] = data
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCamelCase : Optional[Any] = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Dict = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Any = new_node
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Optional[Any] = new_node
def a_ ( self ): # print every node data
print(self )
def a_ ( self ):
return self.delete_nth(0 )
def a_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def a_ ( self , SCREAMING_SNAKE_CASE_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCamelCase : Union[str, Any] = self.head # default first node
if index == 0:
UpperCamelCase : Optional[Any] = self.head.next
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : Dict = temp.next.next
return delete_node.data
def a_ ( self ):
return self.head is None
def a_ ( self ):
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : Optional[int] = current.next
# Make the current node's next point backwards
UpperCamelCase : Optional[Any] = prev
# Make the previous node be the current node
UpperCamelCase : int = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : Optional[int] = prev
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ ,i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 ,1 ) )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
UpperCamelCase : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 27
| 0
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = 'linear'
UpperCamelCase : Union[str, Any] = 'cosine'
UpperCamelCase : List[str] = 'cosine_with_restarts'
UpperCamelCase : str = 'polynomial'
UpperCamelCase : int = 'constant'
UpperCamelCase : int = 'constant_with_warmup'
UpperCamelCase : int = 'piecewise_constant'
def __magic_name__ ( lowercase , lowercase = -1 ):
return LambdaLR(lowercase , lambda lowercase : 1 , last_epoch=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase = -1 ):
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1.0 , lowercase ) )
return 1.0
return LambdaLR(lowercase , lowercase , last_epoch=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase = -1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =rule_str.split(""":""" )
SCREAMING_SNAKE_CASE_: Dict =int(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =float(lowercase )
SCREAMING_SNAKE_CASE_: str =value
SCREAMING_SNAKE_CASE_: Dict =float(rule_list[-1] )
def create_rules_function(lowercase , lowercase ):
def rule_func(lowercase ) -> float:
SCREAMING_SNAKE_CASE_: str =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE_: Optional[Any] =create_rules_function(lowercase , lowercase )
return LambdaLR(lowercase , lowercase , last_epoch=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=-1 ):
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase , lowercase , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase = 0.5 , lowercase = -1 ):
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
SCREAMING_SNAKE_CASE_: int =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) )
return LambdaLR(lowercase , lowercase , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase = 1 , lowercase = -1 ):
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) )
return LambdaLR(lowercase , lowercase , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=1e-7 , lowercase=1.0 , lowercase=-1 ):
SCREAMING_SNAKE_CASE_: List[Any] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 , lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE_: int =lr_init - lr_end
SCREAMING_SNAKE_CASE_: Dict =num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE_: int =1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE_: Any =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase , lowercase , lowercase )
_UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = 1 , lowercase = 1.0 , lowercase = -1 , ):
SCREAMING_SNAKE_CASE_: Optional[int] =SchedulerType(lowercase )
SCREAMING_SNAKE_CASE_: Dict =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase , last_epoch=lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase , step_rules=lowercase , last_epoch=lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase , num_warmup_steps=lowercase , last_epoch=lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , num_cycles=lowercase , last_epoch=lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , power=lowercase , last_epoch=lowercase , )
return schedule_func(
lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , last_epoch=lowercase )
| 173
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'sew-d'
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Any = feat_extract_norm
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : Any = list(UpperCAmelCase_ )
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : str = num_conv_pos_embedding_groups
lowerCamelCase : Optional[int] = len(self.conv_dim )
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = squeeze_factor
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Union[str, Any] = share_att_key
lowerCamelCase : Optional[int] = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : List[str] = feat_proj_dropout
lowerCamelCase : List[str] = final_dropout
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Optional[Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[Any] = use_weighted_layer_sum
lowerCamelCase : Dict = classifier_proj_size
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205
|
"""simple docstring"""
from __future__ import annotations
_A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase ( a_, a_, a_, a_, a_, ):
'''simple docstring'''
lowerCamelCase : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the reference grid
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the action grid
lowerCamelCase : List[str] = init[0]
lowerCamelCase : Optional[Any] = init[1]
lowerCamelCase : List[Any] = 0
lowerCamelCase : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase : Union[str, Any] = [[f, g, x, y]]
lowerCamelCase : Union[str, Any] = False # flag that is set when search is complete
lowerCamelCase : str = False # flag set if we can't find expand
while not found and not resign:
if len(a_ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase : int = cell.pop()
lowerCamelCase : str = next_cell[2]
lowerCamelCase : Union[str, Any] = next_cell[3]
lowerCamelCase : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase : Any = True
else:
for i in range(len(a_ ) ): # to try out different valid actions
lowerCamelCase : Tuple = x + DIRECTIONS[i][0]
lowerCamelCase : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase : str = g + cost
lowerCamelCase : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Any = i
lowerCamelCase : Any = []
lowerCamelCase : Optional[int] = goal[0]
lowerCamelCase : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase : Dict = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase : Dict = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase : Optional[Any] = xa
lowerCamelCase : Union[str, Any] = ya
invpath.append([x, y] )
lowerCamelCase : Optional[int] = []
for i in range(len(a_ ) ):
path.append(invpath[len(a_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_A = [0, 0]
# all coordinates are given in format [y,x]
_A = [len(grid) - 1, len(grid[0]) - 1]
_A = 1
# the cost map which pushes the path closer to the goal
_A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_A = 9_9
_A , _A = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 205
| 1
|
"""simple docstring"""
def __lowerCamelCase ( a_ : Dict = 10_00 ) -> List[str]:
__SCREAMING_SNAKE_CASE :Dict = 2**power
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
while n:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 191
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
| 0
|
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_lowercase : Any = sorted(string.lower() )
return len(lowerCamelCase_ ) == len(set(lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = input("Enter a string ").strip()
SCREAMING_SNAKE_CASE : Any = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 84
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Union[str, Any] = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
_lowercase : Any = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 1
|
'''simple docstring'''
import argparse
import os
import re
lowerCamelCase = """src/diffusers"""
# Pattern that looks at the indentation in a line.
lowerCamelCase = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase = re.compile(R"""\[([^\]]+)\]""")
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =_re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def _A ( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =0
__lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
__lowercase =['\n'.join(lines[:index] )]
else:
__lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase =[lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
__lowercase =[lines[index + 1]]
index += 1
else:
__lowercase =[]
else:
blocks.append('\n'.join(_lowerCAmelCase ) )
__lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append('\n'.join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _A ( _lowerCAmelCase ):
"""simple docstring"""
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace('_' , '' )
return _inner
def _A ( _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
def noop(_lowerCAmelCase ):
return x
if key is None:
__lowercase =noop
# Constants are all uppercase, they go first.
__lowercase =[obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase =[obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase =[obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
__lowercase =ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
def _replace(_lowerCAmelCase ):
__lowercase =match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
__lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase =keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] ) + "]"
__lowercase =import_statement.split('\n' )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase =2 if lines[1].strip() == '[' else 1
__lowercase =[(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase =sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
__lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase =keys[:-1]
__lowercase =get_indent(lines[1] ) + ', '.join([f"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
__lowercase =_re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def _A ( _lowerCAmelCase , _lowerCAmelCase=True ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' ) as f:
__lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase =split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase =main_blocks[block_idx]
__lowercase =block.split('\n' )
# Get to the start of the imports.
__lowercase =0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase =len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase ='\n'.join(block_lines[line_idx:-1] )
__lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase =split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase =_re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase =[(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase =[(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
__lowercase =[x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase =0
__lowercase =[]
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
__lowercase ='\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(_lowerCAmelCase , 'w' ) as f:
f.write('\n'.join(_lowerCAmelCase ) )
def _A ( _lowerCAmelCase=True ):
"""simple docstring"""
__lowercase =[]
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
__lowercase =sort_imports(os.path.join(_lowerCAmelCase , '__init__.py' ) , check_only=_lowerCAmelCase )
if result:
__lowercase =[os.path.join(_lowerCAmelCase , '__init__.py' )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Would overwrite {len(_lowerCAmelCase )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 166
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase = float("""nan""")
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =sys.stdout
__lowercase =open(_lowerCAmelCase , 'a')
def __getattr__( self : Any , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return getattr(self.stdout , _lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : int):
'''simple docstring'''
self.stdout.write(_lowerCAmelCase)
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowerCAmelCase , 0 , re.M))
def _A ( _lowerCAmelCase=80 , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =[]
# deal with critical env vars
__lowercase =['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__lowercase =os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__lowercase =sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowercase =[]
__lowercase =''
while len(_lowerCAmelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
__lowercase =''
return "\\\n".join(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__lowercase =re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__lowercase =re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
__lowercase =subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__lowercase =variation.replace(' ' , '-' )
with open(Path(_lowerCAmelCase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
__lowercase =json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
__lowercase =f"""{id}: {variation:<{longest_variation_len}}"""
__lowercase =f"""{preamble}: """
__lowercase =set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
__lowercase =process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase =single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__lowercase =f"""\33[2K\r{outcome}"""
if len(_lowerCAmelCase ) > 0:
__lowercase ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowercase =round(mean_metrics[target_metric_key] , 2 )
__lowercase =f"""{outcome} {mean_target}"""
if len(_lowerCAmelCase ) > 1:
results_str += f""" {tuple(round(_lowerCAmelCase , 2 ) for x in results )}"""
print(_lowerCAmelCase )
__lowercase =variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def _A ( ):
"""simple docstring"""
__lowercase =torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =pd.DataFrame(_lowerCAmelCase )
__lowercase ='variation'
__lowercase ='diff_%'
__lowercase =nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowercase =df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__lowercase =df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
__lowercase =df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__lowercase =[variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowercase =df.reindex(_lowerCAmelCase , axis='columns' ) # reorder cols
# capitalize
__lowercase =df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__lowercase =df.rename(lambda _lowerCAmelCase : c.replace('_' , '<br>' ) , axis='columns' )
__lowercase =df.rename(lambda _lowerCAmelCase : c.replace('_' , '\n' ) , axis='columns' )
__lowercase =['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt='.2f' )]
print('\n\n'.join(_lowerCAmelCase ) )
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs='+' , required=_lowerCAmelCase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=_lowerCAmelCase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=_lowerCAmelCase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=_lowerCAmelCase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=_lowerCAmelCase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__lowercase =parser.parse_args()
__lowercase =args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
__lowercase =get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
__lowercase =[list(map(str.strip , re.split(r'\|' , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowercase =list(map(str.strip , map(' '.join , itertools.product(*_lowerCAmelCase ) ) ) )
__lowercase =max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
__lowercase =args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowercase =f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
__lowercase =Tee(_lowerCAmelCase )
print(f"""\n*** Running {len(_lowerCAmelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(_lowerCAmelCase )}""" )
__lowercase ='variation'
__lowercase =[]
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc='Total completion: ' , leave=_lowerCAmelCase ) ):
__lowercase =base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 166
| 1
|
import numpy as np
import datasets
__A ='''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__A ='''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__A ='''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
# convert to numpy arrays
lowerCamelCase_ = np.array(lowercase )
lowerCamelCase_ = np.array(lowercase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
lowerCamelCase_ = X - np.mean(lowercase )
lowerCamelCase_ = np.cov(reference_distribution.T )
try:
lowerCamelCase_ = np.linalg.inv(lowercase )
except np.linalg.LinAlgError:
lowerCamelCase_ = np.linalg.pinv(lowercase )
lowerCamelCase_ = np.dot(lowercase , lowercase )
lowerCamelCase_ = np.dot(lowercase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 47
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47
| 1
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> int:
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
lowerCAmelCase = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
lowerCAmelCase = Text(
cache_dir=lowercase , data_files=lowercase , features=lowercase , **lowercase , )
def _snake_case ( self ) -> Tuple:
# Build iterable dataset
if self.streaming:
lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
| 46
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = 1
@register_to_config
def __init__( self , lowerCamelCase__=2_000 , lowerCamelCase__=0.1 , lowerCamelCase__=20 , lowerCamelCase__=1e-3 ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Any:
'''simple docstring'''
__lowerCamelCase = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase = std.unsqueeze(-1 )
__lowerCamelCase = -score / std
# compute
__lowerCamelCase = -1.0 / len(self.timesteps )
__lowerCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase = beta_t.unsqueeze(-1 )
__lowerCamelCase = -0.5 * beta_t * x
__lowerCamelCase = torch.sqrt(lowerCamelCase__ )
__lowerCamelCase = drift - diffusion**2 * score
__lowerCamelCase = x + drift * dt
# add noise
__lowerCamelCase = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype )
__lowerCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 348
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a ( A__ : Dict , A__ : Any=None , A__ : List[Any]=None , A__ : Optional[Any]=None ) -> Dict:
"""simple docstring"""
_lowercase =True
while ask_again:
_lowercase =input(A__ )
try:
if default is not None and len(A__ ) == 0:
return default
return convert_value(A__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A__ )
def a ( A__ : Any , A__ : str=[] , A__ : Any=None , A__ : Dict=0 ) -> List[str]:
"""simple docstring"""
_lowercase =BulletMenu(A__ , A__ )
_lowercase =menu.run(default_choice=A__ )
return convert_value(A__ ) if convert_value is not None else result
def a ( A__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =int(A__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def a ( A__ : List[Any] ) -> Any:
"""simple docstring"""
_lowercase =int(A__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def a ( A__ : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowercase =int(A__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a ( A__ : Optional[Any] ) -> Any:
"""simple docstring"""
_lowercase =int(A__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def a ( A__ : Tuple ) -> str:
"""simple docstring"""
_lowercase =int(A__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def a ( A__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =super()._format_usage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowercase =usage.replace('<command> [<args>] ' , '' )
return usage
| 205
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a ( A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_lowercase =ksize + 1
_lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
_lowercase =x - ksize // 2
_lowercase =y - ksize // 2
# degree to radiant
_lowercase =theta / 180 * np.pi
_lowercase =np.cos(_theta )
_lowercase =np.sin(_theta )
# get kernel x
_lowercase =cos_theta * px + sin_theta * py
# get kernel y
_lowercase =-sin_theta * px + cos_theta * py
# fill kernel
_lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase_ = imread('../image_data/lena.jpg')
# turn image in gray scale value
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase_ = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
lowercase_ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase_ = out / out.max() * 2_5_5
lowercase_ = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 205
| 1
|
'''simple docstring'''
from collections import deque
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = process_name # process name
UpperCAmelCase_ : Tuple = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ : Any = arrival_time
UpperCAmelCase_ : List[str] = burst_time # remaining burst time
UpperCAmelCase_ : Any = 0 # total time of the process wait in ready queue
UpperCAmelCase_ : Dict = 0 # time from arrival time to completion time
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ : str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ : Dict = queue
# current time
UpperCAmelCase_ : int = current_time
# finished process is in this sequence queue
UpperCAmelCase_ : deque[Process] = deque()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for i in range(len(_lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for i in range(len(_lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
for i in range(len(_lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return [q.burst_time for q in queue]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : deque[Process] = deque() # sequence deque of finished process
while len(_lowerCamelCase ) != 0:
UpperCAmelCase_ : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ : Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ : Optional[int] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowerCamelCase ) ):
UpperCAmelCase_ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ : Optional[Any] = 0
# set the finish time
UpperCAmelCase_ : int = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _UpperCamelCase ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase_ : Optional[int] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case__ : Optional[int] = Process('''P1''', 0, 53)
snake_case__ : Any = Process('''P2''', 0, 17)
snake_case__ : Optional[Any] = Process('''P3''', 0, 68)
snake_case__ : int = Process('''P4''', 0, 24)
snake_case__ : Dict = 3
snake_case__ : List[Any] = [17, 25]
snake_case__ : int = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
snake_case__ : str = Process('''P1''', 0, 53)
snake_case__ : str = Process('''P2''', 0, 17)
snake_case__ : str = Process('''P3''', 0, 68)
snake_case__ : Tuple = Process('''P4''', 0, 24)
snake_case__ : Optional[Any] = 3
snake_case__ : List[Any] = [17, 25]
snake_case__ : Optional[int] = deque([Pa, Pa, Pa, Pa])
snake_case__ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case__ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 369
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['c'] )
self.assertEqual(snake_case_ , [2] )
# Out indices set to match out features
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(['a', 'c'] , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features set to match out indices
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [-3, -1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case_ )
# Out features must be a list
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BackboneMixin()
UpperCAmelCase_ : Any = ['a', 'b', 'c']
UpperCAmelCase_ : str = ['a', 'c']
UpperCAmelCase_ : str = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase_ : str = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase_ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 274
| 0
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
UpperCAmelCase_ :List[Any] = 0
UpperCAmelCase_ :int = 1
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = "generated"
def __init__( self , *__A , **__A ) -> int:
super().__init__(*__A , **__A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , **__A , ) -> str:
lowerCAmelCase_ :List[str] = {}
if truncation is not None:
lowerCAmelCase_ :List[Any] = truncation
lowerCAmelCase_ :Optional[int] = generate_kwargs
lowerCAmelCase_ :Optional[int] = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ :Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ :Any = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ :str = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ :int = self.tokenizer.encode(__A , add_special_tokens=__A )
if len(__A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase_ :Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
return True
def __lowerCAmelCase ( self , *__A , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowerCAmelCase_ :Dict = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ :Optional[int] = True
elif isinstance(args[0] , __A ):
lowerCAmelCase_ :Any = (prefix + args[0],)
lowerCAmelCase_ :Optional[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowerCAmelCase_ :List[str] = self.tokenizer(*__A , padding=__A , truncation=__A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__A , **__A ) -> Optional[int]:
lowerCAmelCase_ :Any = super().__call__(*__A , **__A )
if (
isinstance(args[0] , __A )
and all(isinstance(__A , __A ) for el in args[0] )
and all(len(__A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self , __A , __A=TruncationStrategy.DO_NOT_TRUNCATE , **__A ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self._parse_and_tokenize(__A , truncation=__A , **__A )
return inputs
def __lowerCAmelCase ( self , __A , **__A ) -> str:
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowerCAmelCase_ :Optional[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowerCAmelCase_ :Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowerCAmelCase_ :Optional[Any] = self.model.generate(**__A , **__A )
lowerCAmelCase_ :Optional[Any] = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ :str = output_ids.reshape(__A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ :Tuple = tf.reshape(__A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self , __A , __A=ReturnType.TEXT , __A=False ) -> List[str]:
lowerCAmelCase_ :int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ :Optional[Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ :str = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
}
records.append(__A )
return records
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "summary"
def __call__( self , *__A , **__A ) -> Tuple:
return super().__call__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = "translation"
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def __lowerCAmelCase ( self , *__A , __A=TruncationStrategy.DO_NOT_TRUNCATE , __A=None , __A=None ) -> Union[str, Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , __A ):
return self.tokenizer._build_translation_inputs(
*__A , return_tensors=self.framework , truncation=__A , src_lang=__A , tgt_lang=__A )
else:
return super()._parse_and_tokenize(*__A , truncation=__A )
def __lowerCAmelCase ( self , __A=None , __A=None , **__A ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = super()._sanitize_parameters(**__A )
if src_lang is not None:
lowerCAmelCase_ :List[str] = src_lang
if tgt_lang is not None:
lowerCAmelCase_ :Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ :Dict = kwargs.get("""task""" , self.task )
lowerCAmelCase_ :Any = task.split("""_""" )
if task and len(__A ) == 4:
# translation, XX, to YY
lowerCAmelCase_ :Optional[int] = items[1]
lowerCAmelCase_ :Optional[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__A , **__A ) -> str:
return super().__call__(*__A , **__A )
| 84
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case__ : List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase ,cache_dir=__lowercase )
snake_case__ : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(__lowercase ,os.listdir(__lowercase )[0] ,'''snapshots''' ) )]
snake_case__ : Optional[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase )
snake_case__ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = 4
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : Tuple = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Union[str, Any] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__lowercase ,dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
snake_case__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=__lowercase )
snake_case__ : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : Tuple = 5_0
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : List[str] = num_samples * [prompt]
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def __lowerCamelCase ( self :Tuple ):
snake_case__ , snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase )
snake_case__ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Optional[Any] = 5_0
snake_case__ : Dict = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : Any = jax.random.split(__lowercase ,__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : Union[str, Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
snake_case__ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.random.PRNGKey(0 )
snake_case__ : Dict = 5_0
snake_case__ : Tuple = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : Any = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : List[str] = replicate(__lowercase )
snake_case__ : List[str] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=__lowercase ,steps_offset=1 ,)
snake_case__ , snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=__lowercase ,safety_checker=__lowercase ,)
snake_case__ : Dict = scheduler.create_state()
snake_case__ : Any = scheduler_state
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : int = 5_0
snake_case__ : int = jax.device_count()
snake_case__ : Tuple = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Tuple = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : List[str] = jax.random.split(jax.random.PRNGKey(0 ) ,__lowercase )
snake_case__ , snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,)
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : int = pipeline.prepare_inputs(__lowercase )
snake_case__ : Optional[int] = shard(__lowercase )
snake_case__ : int = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case__ , snake_case__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,use_memory_efficient_attention=__lowercase ,)
snake_case__ : str = replicate(__lowercase )
snake_case__ : List[Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : Tuple = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : str = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 44
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case__ : List[str] = 1
snake_case__ : int = 1
while repunit:
snake_case__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class A__ ( A__ ):
A__ = 42
A__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class A__ ( A__ ):
A__ = 42
A__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 47
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__ :
A__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ = field(
default=A__ , metadata={'help': 'The column name of the images in the files.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE =self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE =self.validation_dir
_SCREAMING_SNAKE_CASE =data_files if data_files else None
@dataclass
class A__ :
A__ = field(
default=A__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
A__ = field(
default=A__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A__ = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
A__ = field(
default=A__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A__ ( A__ ):
A__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE =ds['train'].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE =split['train']
_SCREAMING_SNAKE_CASE =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
_SCREAMING_SNAKE_CASE =ds['train'].column_names
else:
_SCREAMING_SNAKE_CASE =ds['validation'].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE =data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE ='image'
elif "img" in column_names:
_SCREAMING_SNAKE_CASE ='img'
else:
_SCREAMING_SNAKE_CASE =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge']
else:
_SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width'])
_SCREAMING_SNAKE_CASE =Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : Dict ):
_SCREAMING_SNAKE_CASE =[transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 47
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''beit'''
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any]=8192 , __UpperCAmelCase : Tuple=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : int=224 , __UpperCAmelCase : Dict=16 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=[3, 5, 7, 11] , __UpperCAmelCase : Union[str, Any]=[1, 2, 3, 6] , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=0.4 , __UpperCAmelCase : Any=256 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Tuple=255 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = use_mask_token
_A = use_absolute_position_embeddings
_A = use_relative_position_bias
_A = use_shared_relative_position_bias
_A = layer_scale_init_value
_A = drop_path_rate
_A = use_mean_pooling
# decode head attributes (semantic segmentation)
_A = out_indices
_A = pool_scales
# auxiliary head attributes (semantic segmentation)
_A = use_auxiliary_head
_A = auxiliary_loss_weight
_A = auxiliary_channels
_A = auxiliary_num_convs
_A = auxiliary_concat_input
_A = semantic_loss_ignore_index
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 1E-4
| 350
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 174
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case__=2000 , snake_case__=0.1 , snake_case__=20 , snake_case__=1e-3 ) -> int:
'''simple docstring'''
UpperCAmelCase : Any =None
UpperCAmelCase : Optional[Any] =None
UpperCAmelCase : Optional[int] =None
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , snake_case__ , device=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> Any:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase : Optional[Any] =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase : List[Any] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase : Dict =std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase : Optional[Any] =std.unsqueeze(-1 )
UpperCAmelCase : Tuple =-score / std
# compute
UpperCAmelCase : Optional[Any] =-1.0 / len(self.timesteps )
UpperCAmelCase : Optional[Any] =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase : List[Any] =beta_t.unsqueeze(-1 )
UpperCAmelCase : int =-0.5 * beta_t * x
UpperCAmelCase : Optional[Any] =torch.sqrt(snake_case__ )
UpperCAmelCase : Optional[int] =drift - diffusion**2 * score
UpperCAmelCase : Dict =x + drift * dt
# add noise
UpperCAmelCase : Dict =randn_tensor(x.shape , layout=x.layout , generator=snake_case__ , device=x.device , dtype=x.dtype )
UpperCAmelCase : Union[str, Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 348
|
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Dict =str(bin(__lowerCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase : Any =str(bin(__lowerCAmelCase ) )[2:]
if shift_amount >= len(__lowerCAmelCase ):
return "0b0"
UpperCAmelCase : Optional[Any] =binary_number[: len(__lowerCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase : Optional[Any] ='''0''' + str(bin(__lowerCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase : int =len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase : Any =bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Optional[Any] =(
'''1''' + '''0''' * (binary_number_length - len(__lowerCAmelCase )) + binary_number
)
if shift_amount >= len(__lowerCAmelCase ):
return "0b" + binary_number[0] * len(__lowerCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
snake_case : str = path_or_paths
snake_case : List[Any] = split if split or isinstance(UpperCamelCase__ , UpperCamelCase__ ) else "train"
snake_case : List[str] = features
snake_case : int = cache_dir
snake_case : Union[str, Any] = keep_in_memory
snake_case : Dict = streaming
snake_case : Union[str, Any] = num_proc
snake_case : Tuple = kwargs
@abstractmethod
def lowerCamelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = features
snake_case : Tuple = cache_dir
snake_case : Tuple = keep_in_memory
snake_case : Dict = streaming
snake_case : Tuple = num_proc
snake_case : int = kwargs
@abstractmethod
def lowerCamelCase ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 112
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , "rb" ) as fp:
snake_case : int = pickle.load(lowercase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case : str = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
snake_case : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowercase )
snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case : Union[str, Any] = os.path.abspath(lowercase )
snake_case : str = os.path.abspath(lowercase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case : int = TransfoXLConfig()
else:
snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = TransfoXLLMHeadModel(lowercase )
snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
snake_case : Optional[Any] = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 112
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : str = '''▁'''
A : Any = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A : List[Any] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
A : Tuple = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
A : Optional[int] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None:
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = vocab_file
A__ = load_json(__lowerCAmelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else """en"""
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
A__ = []
A__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
A__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ = src_lang
A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.get_lang_id(__lowerCAmelCase )
A__ = tgt_lang_id
return inputs
def a_ ( self : Dict ) -> int:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a_ ( self : str , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A__ = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def __lowerCamelCase ( __a :str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__a , """r""" ) as f:
return json.load(__a )
def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None:
"""simple docstring"""
with open(__a , """w""" ) as f:
json.dump(__a , __a , indent=2 )
| 274
| 0
|
from sklearn.metrics import recall_score
import datasets
__UpperCAmelCase : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCAmelCase : Any = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCAmelCase : Optional[int] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : Optional[Any] , A : Optional[int]=None , A : Optional[int]=1 , A : str="binary" , A : Tuple=None , A : Dict="warn" , ):
__snake_case: List[str] = recall_score(
A , A , labels=A , pos_label=A , average=A , sample_weight=A , zero_division=A , )
return {"recall": float(A ) if score.size == 1 else score}
| 293
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_a : str = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_a : str = 128_022
_a : Optional[Any] = 128_028
@require_sentencepiece
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = MaMaaaTokenizer
_UpperCamelCase : int = False
_UpperCamelCase : int = False
_UpperCamelCase : int = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_lowerCAmelCase : Dict = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : List[Any] = Path(self.tmpdirname )
save_json(a__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
_lowerCAmelCase : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
return (
"This is a test",
"This is a test",
)
def __A ( self ):
_lowerCAmelCase : str = """</s>"""
_lowerCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(a__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [2, 3, 4, 5, 6] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
_lowerCAmelCase : int = tokenizer.convert_tokens_to_string(a__ )
self.assertEqual(a__ , """This is a test""" )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : Optional[int] = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = "facebook/m2m100_418M"
_UpperCamelCase : Optional[Any] = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
_UpperCamelCase : Optional[int] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
_UpperCamelCase : List[str] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def __A ( cls ):
_lowerCAmelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
_lowerCAmelCase : Tuple = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128063 )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer.get_vocab()
self.assertEqual(len(a__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = """en"""
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def __A ( self ):
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
_lowerCAmelCase : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
_lowerCAmelCase : str = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
_lowerCAmelCase : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a__ )
_lowerCAmelCase : Tuple = MaMaaaTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.lang_token_to_id , a__ )
@require_torch
def __A ( self ):
_lowerCAmelCase : str = """en"""
_lowerCAmelCase : int = """fr"""
_lowerCAmelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors="""pt""" )
_lowerCAmelCase : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_lowerCAmelCase : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_lowerCAmelCase : List[Any] = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_lowerCAmelCase : Union[str, Any] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(a__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128006,
} , )
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : str = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : str = logging.getLogger(__name__)
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__UpperCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__UpperCamelCase : Tuple = args.per_device_eval_batch_size
__UpperCamelCase : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : str = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__UpperCamelCase : Any = 'temp_engine/bert-fp16.engine'
if args.inta:
__UpperCamelCase : Dict = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__UpperCamelCase : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : Any = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Union[str, Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE : Tuple = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : int = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Tuple = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowercase )
# start time
SCREAMING_SNAKE_CASE : str = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowercase ) for d_inp in d_inputs] + [int(_lowercase ), int(_lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase )
cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE : List[str] = end_time - start_time
SCREAMING_SNAKE_CASE : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : List[str] = raw_datasets['validation'].column_names
__UpperCamelCase : int = 'question' if 'question' in column_names else column_names[0]
__UpperCamelCase : int = 'context' if 'context' in column_names else column_names[1]
__UpperCamelCase : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : Optional[Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__UpperCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def A ( _lowercase ) -> Dict:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
SCREAMING_SNAKE_CASE : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE : Any = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_lowercase , stride=args.doc_stride , return_overflowing_tokens=_lowercase , return_offsets_mapping=_lowercase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE : Optional[int] = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE : List[Any] = tokenized_examples.sequence_ids(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__UpperCamelCase : Any = raw_datasets['validation']
# Validation Feature Creation
__UpperCamelCase : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__UpperCamelCase : str = default_data_collator
__UpperCamelCase : List[Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__UpperCamelCase : Dict = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A ( _lowercase , _lowercase , _lowercase , _lowercase="eval" ) -> str:
# Post-processing: we match the start logits and end logits to answers in the original context.
SCREAMING_SNAKE_CASE : Dict = postprocess_qa_predictions(
examples=_lowercase , features=_lowercase , predictions=_lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE : Optional[int] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE : Tuple = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowercase , label_ids=_lowercase )
__UpperCamelCase : Dict = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A ( _lowercase ) -> str:
return trt.volume(engine.get_binding_shape(_lowercase ) ) * engine.get_binding_dtype(_lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : int = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : List[str] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__UpperCamelCase : Dict = 0.0
__UpperCamelCase : str = 0
__UpperCamelCase : Optional[Any] = timeit.default_timer()
__UpperCamelCase : str = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase : Any = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase : Tuple = outputs
__UpperCamelCase : Optional[Any] = torch.tensor(start_logits)
__UpperCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : List[str] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
__UpperCamelCase : Any = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : int = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 356
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = KandinskyInpaintPipeline
UpperCamelCase_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase_ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __A ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : int = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[Any] = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : int = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = '''a hat'''
SCREAMING_SNAKE_CASE : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 258
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> str:
"""simple docstring"""
UpperCamelCase :List[str] = [1]
for i in range(2 , __magic_name__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase :Tuple = []
UpperCamelCase :Any = list(range(__magic_name__ ) )
# Find permutation
while factorials:
UpperCamelCase :List[Any] = factorials.pop()
UpperCamelCase , UpperCamelCase :Optional[int] = divmod(__magic_name__ , __magic_name__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert x is not None
assert y is not None
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# declaring the array for storing the dp values
__lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
for i in range(1, m + 1):
for j in range(1, n + 1):
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCAmelCase = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match)
__lowerCAmelCase = ''''''
__lowerCAmelCase , __lowerCAmelCase = m, n
while i > 0 and j > 0:
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = """AGGTAB"""
_UpperCAmelCase : int = """GXTXAYB"""
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = """GTAB"""
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 174
| 0
|
from PIL import Image
def _lowerCAmelCase ( A__: Image , A__: float ):
'''simple docstring'''
def brightness(A__: int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__magic_name__ = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 152
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152
| 1
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=1_3 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=9_9 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Union[str, Any]=3_2 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : str=5_1_2 , lowerCAmelCase__ : str=1_6 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : List[Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : str = embedding_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[Any] = scope
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = MobileBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = MobileBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = MobileBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Any = MobileBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.num_choices
__SCREAMING_SNAKE_CASE : Tuple = MobileBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : List[str] = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : int = True
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
UpperCamelCase__ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=lowerCAmelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 112
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ : List[Any] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
UpperCamelCase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase_ ( _lowerCamelCase: tuple ):
return x[0]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = get_letter_count(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = """""".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = get_frequency_order(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A = logging.getLogger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if os.path.exists(_SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
else:
os.makedirs(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Dict = 2
if unlogit:
lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = 0
return -plogp.sum(dim=-1 )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(_SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Any = 0.0
lowerCAmelCase__ :Any = 0.0
for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) , ) :Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ :Union[str, Any] = 2
lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
logger.info('Head ranked by importance scores' )
lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ :List[Any] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ :List[str] = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ :str = float('Inf' )
lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1]
if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ :Dict = new_head_mask.view(-1 )
lowerCAmelCase__ :Any = 0.0
lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(_SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = 1 / loss
lowerCAmelCase__ :Tuple = datetime.now() - before_time
lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ :List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = [
v,
]
assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ :int = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :int = 1 / loss
lowerCAmelCase__ :Tuple = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_SCREAMING_SNAKE_CASE , args.output_dir )
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ :Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank )
lowerCAmelCase__ :Tuple = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel(
_SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Prepare dataset
lowerCAmelCase__ :Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),)
lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293
|
"""simple docstring"""
from __future__ import annotations
__A = 1.6_021e-19 # units = C
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase_ ( __lowercase ):
UpperCamelCase_ : BigBirdConfig
UpperCamelCase_ : jnp.dtype = jnp.floataa
UpperCamelCase_ : bool = True
def UpperCamelCase_ ( self : List[str] ) -> Tuple:
super().setup()
_snake_case = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *A__ : str , **A__ : List[Any] ) -> Union[str, Any]:
_snake_case = super().__call__(*A__ , **A__ )
_snake_case = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Tuple = FlaxBigBirdForNaturalQuestionsModule
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def cross_entropy(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
_snake_case = logits.shape[-1]
_snake_case = (labels[..., None] == jnp.arange(_UpperCamelCase )[None]).astype('''f4''' )
_snake_case = jax.nn.log_softmax(_UpperCamelCase , axis=-1 )
_snake_case = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_snake_case = reduction(_UpperCamelCase )
return loss
_snake_case = partial(_UpperCamelCase , reduction=jnp.mean )
_snake_case = cross_entropy(_UpperCamelCase , _UpperCamelCase )
_snake_case = cross_entropy(_UpperCamelCase , _UpperCamelCase )
_snake_case = cross_entropy(_UpperCamelCase , _UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase_ :
UpperCamelCase_ : str = "google/bigbird-roberta-base"
UpperCamelCase_ : int = 3_0_0_0
UpperCamelCase_ : int = 1_0_5_0_0
UpperCamelCase_ : int = 1_2_8
UpperCamelCase_ : int = 3
UpperCamelCase_ : int = 1
UpperCamelCase_ : int = 5
# tx_args
UpperCamelCase_ : float = 3E-5
UpperCamelCase_ : float = 0.0
UpperCamelCase_ : int = 2_0_0_0_0
UpperCamelCase_ : float = 0.0_095
UpperCamelCase_ : str = "bigbird-roberta-natural-questions"
UpperCamelCase_ : str = "training-expt"
UpperCamelCase_ : str = "data/nq-training.jsonl"
UpperCamelCase_ : str = "data/nq-validation.jsonl"
def UpperCamelCase_ ( self : List[Any] ) -> Tuple:
os.makedirs(self.base_dir , exist_ok=A__ )
_snake_case = os.path.join(self.base_dir , self.save_dir )
_snake_case = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase_ :
UpperCamelCase_ : int
UpperCamelCase_ : int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Dict , A__ : Optional[Any] ) -> Optional[Any]:
_snake_case = self.collate_fn(A__ )
_snake_case = jax.tree_util.tree_map(A__ , A__ )
return batch
def UpperCamelCase_ ( self : Dict , A__ : str ) -> List[str]:
_snake_case, _snake_case = self.fetch_inputs(features['''input_ids'''] )
_snake_case = {
'''input_ids''': jnp.array(A__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(A__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self : str , A__ : list ) -> Tuple:
_snake_case = [self._fetch_inputs(A__ ) for ids in input_ids]
return zip(*A__ )
def UpperCamelCase_ ( self : Optional[Any] , A__ : list ) -> Dict:
_snake_case = [1 for _ in range(len(A__ ) )]
while len(A__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
if seed is not None:
_snake_case = dataset.shuffle(seed=_UpperCamelCase )
for i in range(len(_UpperCamelCase ) // batch_size ):
_snake_case = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Any:
"""simple docstring"""
def loss_fn(_UpperCamelCase ):
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**_UpperCamelCase , params=_UpperCamelCase , dropout_rng=_UpperCamelCase , train=_UpperCamelCase )
_snake_case, _snake_case, _snake_case = outputs
return state.loss_fn(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_snake_case, _snake_case = jax.random.split(_UpperCamelCase )
_snake_case = jax.value_and_grad(_UpperCamelCase )
_snake_case, _snake_case = grad_fn(state.params )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_snake_case = jax.lax.pmean(_UpperCamelCase , '''batch''' )
_snake_case = state.apply_gradients(grads=_UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_(_UpperCamelCase , **_UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = model_inputs.pop('''start_labels''' )
_snake_case = model_inputs.pop('''end_labels''' )
_snake_case = model_inputs.pop('''pooled_labels''' )
_snake_case = state.apply_fn(**_UpperCamelCase , params=state.params , train=_UpperCamelCase )
_snake_case, _snake_case, _snake_case = outputs
_snake_case = state.loss_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowercase_ ( train_state.TrainState ):
UpperCamelCase_ : Callable = struct.field(pytree_node=__lowercase )
@dataclass
class lowercase_ :
UpperCamelCase_ : Args
UpperCamelCase_ : Callable
UpperCamelCase_ : Callable
UpperCamelCase_ : Callable
UpperCamelCase_ : Callable
UpperCamelCase_ : wandb
UpperCamelCase_ : Callable = None
def UpperCamelCase_ ( self : Optional[Any] , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : Any , A__ : Optional[int]=None ) -> List[str]:
_snake_case = model.params
_snake_case = TrainState.create(
apply_fn=model.__call__ , params=A__ , tx=A__ , loss_fn=A__ , )
if ckpt_dir is not None:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = restore_checkpoint(A__ , A__ )
_snake_case = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_snake_case, _snake_case = build_tx(**A__ )
_snake_case = train_state.TrainState(
step=A__ , apply_fn=model.__call__ , params=A__ , tx=A__ , opt_state=A__ , )
_snake_case = args
_snake_case = data_collator
_snake_case = lr
_snake_case = params
_snake_case = jax_utils.replicate(A__ )
return state
def UpperCamelCase_ ( self : Union[str, Any] , A__ : List[str] , A__ : str , A__ : int ) -> Tuple:
_snake_case = self.args
_snake_case = len(A__ ) // args.batch_size
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(A__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = get_batched_dataset(A__ , args.batch_size , seed=A__ )
_snake_case = 0
for batch in tqdm(A__ , total=A__ , desc=f"""Running EPOCH-{epoch}""" ):
_snake_case = self.data_collator(A__ )
_snake_case, _snake_case, _snake_case = self.train_step_fn(A__ , A__ , **A__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_snake_case = jax_utils.unreplicate(state.step )
_snake_case = running_loss.item() / i
_snake_case = self.scheduler_fn(state_step - 1 )
_snake_case = self.evaluate(A__ , A__ )
_snake_case = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(A__ ) )
self.logger.log(A__ , commit=A__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=A__ )
def UpperCamelCase_ ( self : int , A__ : List[str] , A__ : List[Any] ) -> Tuple:
_snake_case = get_batched_dataset(A__ , self.args.batch_size )
_snake_case = len(A__ ) // self.args.batch_size
_snake_case = jnp.array(0 , dtype=jnp.floataa )
_snake_case = 0
for batch in tqdm(A__ , total=A__ , desc='''Evaluating ... ''' ):
_snake_case = self.data_collator(A__ )
_snake_case = self.val_step_fn(A__ , **A__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self : int , A__ : Dict , A__ : Tuple ) -> List[Any]:
_snake_case = jax_utils.unreplicate(A__ )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(A__ , params=state.params )
with open(os.path.join(A__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(A__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(A__ , '''data_collator.joblib''' ) )
with open(os.path.join(A__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , A__ )
print('''DONE''' )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' )
with open(os.path.join(_UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.params , f.read() )
with open(os.path.join(_UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_snake_case = from_bytes(state.opt_state , f.read() )
_snake_case = joblib.load(os.path.join(_UpperCamelCase , '''args.joblib''' ) )
_snake_case = joblib.load(os.path.join(_UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(_UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_snake_case = json.load(_UpperCamelCase )
_snake_case = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = num_train_steps - warmup_steps
_snake_case = optax.linear_schedule(init_value=_UpperCamelCase , end_value=_UpperCamelCase , transition_steps=_UpperCamelCase )
_snake_case = optax.linear_schedule(init_value=_UpperCamelCase , end_value=1E-7 , transition_steps=_UpperCamelCase )
_snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
def weight_decay_mask(_UpperCamelCase ):
_snake_case = traverse_util.flatten_dict(_UpperCamelCase )
_snake_case = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_UpperCamelCase )
_snake_case = scheduler_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = optax.adamw(learning_rate=_UpperCamelCase , weight_decay=_UpperCamelCase , mask=_UpperCamelCase )
return tx, lr
| 360
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
UpperCAmelCase : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase : Optional[int] = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : str = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase : Optional[int] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = dct.pop(UpperCamelCase )
UpperCAmelCase : str = val
def _snake_case ( UpperCamelCase : Any ):
if "handwritten" in checkpoint_url:
UpperCAmelCase : int = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int ):
UpperCAmelCase : Dict = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase )
UpperCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase : List[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : Any = 4096
UpperCAmelCase : List[str] = 24
UpperCAmelCase : str = 16
UpperCAmelCase : Optional[Any] = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = """relu"""
UpperCAmelCase : Dict = 1024
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
# load HuggingFace model
UpperCAmelCase : Any = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase )
UpperCAmelCase : Any = TrOCRForCausalLM(UpperCamelCase )
UpperCAmelCase : Dict = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )["""model"""]
UpperCAmelCase : str = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase : Any = state_dict.pop(UpperCamelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCAmelCase : Union[str, Any] = val
else:
UpperCAmelCase : List[str] = val
# load state dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image
UpperCAmelCase : Optional[Any] = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCAmelCase : Union[str, Any] = TrOCRProcessor(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Tuple = processor(images=prepare_img(UpperCamelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCAmelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase : Dict = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase )
UpperCAmelCase : List[Any] = outputs.logits
UpperCAmelCase : Optional[int] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase : Optional[Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase : Optional[int] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase : Tuple = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A: List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 109
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __magic_name__ :
def __init__( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=14 , lowerCamelCase__ : int=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Union[str, Any]=32 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : Union[str, Any]=0.02 , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : int = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Tuple = use_input_mask
UpperCamelCase__ : Optional[Any] = use_token_type_ids
UpperCamelCase__ : Union[str, Any] = use_labels
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Dict = rotary_dim
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Dict = vocab_size - 1
UpperCamelCase__ : Optional[int] = vocab_size - 1
UpperCamelCase__ : Union[str, Any] = vocab_size - 1
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = 20
UpperCamelCase__ : Optional[int] = model_class_name(lowerCamelCase__ )
UpperCamelCase__ : Any = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
UpperCamelCase__ : str = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCamelCase__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
UpperCamelCase__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : Dict = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = 20
UpperCamelCase__ : Any = model_class_name(lowerCamelCase__ )
UpperCamelCase__ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCamelCase__ : List[str] = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ : str = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : Any = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCamelCase__ : Optional[Any] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
UpperCamelCase__ : str = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCamelCase__ : int = False
UpperCamelCase__ : List[Any] = model.config.eos_token_id
UpperCamelCase__ : Union[str, Any] = jax.jit(model.generate )
UpperCamelCase__ : List[str] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCamelCase__ : str = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : str = pt_inputs['''input_ids'''].shape
UpperCamelCase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
UpperCamelCase__ : str = 0
UpperCamelCase__ : str = 1
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : List[str] = pt_model_class(lowerCamelCase__ ).eval()
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ , dtype=jnp.floataa )
UpperCamelCase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = fx_state
with torch.no_grad():
UpperCamelCase__ : List[str] = pt_model(**lowerCamelCase__ ).to_tuple()
UpperCamelCase__ : Tuple = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : str = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
UpperCamelCase__ : List[str] = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = pt_model_class(lowerCamelCase__ ).eval()
UpperCamelCase__ : int = model_class(lowerCamelCase__ , dtype=jnp.floataa )
UpperCamelCase__ : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
UpperCamelCase__ , UpperCamelCase__ : Any = pt_inputs['''input_ids'''].shape
UpperCamelCase__ : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Dict = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = pt_model(**lowerCamelCase__ ).to_tuple()
UpperCamelCase__ : Tuple = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Tuple = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCamelCase__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 51
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : int = logging.get_logger(__name__)
class __magic_name__ ( __lowerCAmelCase):
A: str = ["pixel_values"]
def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCamelCase__ : Dict = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : Optional[Any] = do_resize
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : Optional[int] = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : Optional[Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : List[str] = do_flip_channel_order
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PIL.Image.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__ : int = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCamelCase__ , data_format=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[Any] = resample if resample is not None else self.resample
UpperCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : int = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : List[Any] = [self.flip_channel_order(image=lowerCamelCase__ ) for image in images]
UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase__ : int = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Tuple] = None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCamelCase__ : Tuple = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCamelCase__ : Dict = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 51
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = IFInpaintingPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return self._get_dummy_components()
def __magic_name__ ( self : Optional[Any] , __lowercase : str , __lowercase : int=0 ) -> Tuple:
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : int =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : Any ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : List[str] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : List[Any] ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Tuple ) -> Dict:
self._test_save_load_local()
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 152
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = CanineTokenizer
snake_case_ = False
def __magic_name__ ( self : Any ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : int =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def __magic_name__ ( self : Optional[int] , **__lowercase : int ) -> CanineTokenizer:
SCREAMING_SNAKE_CASE__ : int =self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =10_24
return tokenizer
@require_torch
def __magic_name__ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
self.assertIsInstance(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : str =['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertIn('''token_type_ids''' , __lowercase )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Dict =[
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
SCREAMING_SNAKE_CASE__ : int =tokenizer(
text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __magic_name__ ( self : List[str] ) -> Any:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : List[str] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
SCREAMING_SNAKE_CASE__ : str =chr(0xE007 )
additional_special_tokens.append(__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn(__lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_clean_sequence(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[int] =0xE005
SCREAMING_SNAKE_CASE__ : Any =chr(__lowercase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(__lowercase , input_encoded + special_token_id )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =chr(0xE005 )
SCREAMING_SNAKE_CASE__ : List[Any] =chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.tokenize(__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(token_a[0] , __lowercase )
self.assertEqual(token_a[0] , __lowercase )
@require_tokenizers
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : int =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowercase )
tokenizer.from_pretrained(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ : int =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : Dict =json.load(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[Any] =0xE006
SCREAMING_SNAKE_CASE__ : Dict =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : str =[new_token_a]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[new_token_a]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer_class.from_pretrained(__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
SCREAMING_SNAKE_CASE__ : str =0xE007
SCREAMING_SNAKE_CASE__ : Optional[int] =chr(__lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ : Tuple =[AddedToken(__lowercase , lstrip=__lowercase )]
SCREAMING_SNAKE_CASE__ : Any =tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : List[str] ='''hello world'''
if self.space_between_special_tokens:
SCREAMING_SNAKE_CASE__ : str ='''[CLS] hello world [SEP]'''
else:
SCREAMING_SNAKE_CASE__ : int =input
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowercase , [output, output.lower()] )
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : str =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE__ : Tuple ='''a'''
SCREAMING_SNAKE_CASE__ : Tuple =ord(__lowercase )
for attr in attributes_list:
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [] )
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : List[str] =chr(__lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def __magic_name__ ( self : str ) -> Dict:
pass
def __magic_name__ ( self : List[Any] ) -> List[Any]:
pass
def __magic_name__ ( self : Any ) -> int:
pass
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
pass
def __magic_name__ ( self : Dict ) -> Dict:
pass
def __magic_name__ ( self : List[str] ) -> Dict:
pass
| 152
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = '''altclip_text_model'''
def __init__( self : Any , lowerCAmelCase__ : List[Any]=25_0002 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=24 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Tuple=4096 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=514 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=1E-05 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Optional[int]="absolute" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=768 , **lowerCAmelCase__ : Any , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = vocab_size
SCREAMING_SNAKE_CASE_: List[str] = hidden_size
SCREAMING_SNAKE_CASE_: Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: str = initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE_: Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_: Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_: Tuple = project_dim
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''altclip_vision_model'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : str=3072 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : Any=12 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : List[str]=224 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : Union[str, Any]="quick_gelu" , lowerCAmelCase__ : Optional[Any]=1E-5 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Dict=1.0 , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Tuple = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = projection_dim
SCREAMING_SNAKE_CASE_: int = num_hidden_layers
SCREAMING_SNAKE_CASE_: Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = num_channels
SCREAMING_SNAKE_CASE_: int = patch_size
SCREAMING_SNAKE_CASE_: List[Any] = image_size
SCREAMING_SNAKE_CASE_: Any = initializer_range
SCREAMING_SNAKE_CASE_: List[str] = initializer_factor
SCREAMING_SNAKE_CASE_: Tuple = attention_dropout
SCREAMING_SNAKE_CASE_: int = layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] = hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[Any]):
cls._set_token_in_kwargs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type") == "altclip":
SCREAMING_SNAKE_CASE_: Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''altclip'''
_UpperCAmelCase : Dict = True
def __init__( self : Optional[Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=2.6592 , **lowerCAmelCase__ : str):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
SCREAMING_SNAKE_CASE_: str = kwargs.pop("text_config_dict" , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = kwargs.pop("vision_config_dict" , lowerCAmelCase__)
super().__init__(**lowerCAmelCase__)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE_: Any = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE_: int = AltCLIPTextConfig(**lowerCAmelCase__).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE_: Dict = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE_: Any = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(lowerCAmelCase__)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE_: Tuple = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE_: Optional[int] = AltCLIPVisionConfig(**lowerCAmelCase__).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE_: int = {
str(lowerCAmelCase__): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE_: List[str] = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE_: List[Any] = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(lowerCAmelCase__)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
SCREAMING_SNAKE_CASE_: List[Any] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
if vision_config is None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
SCREAMING_SNAKE_CASE_: Optional[Any] = AltCLIPTextConfig(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = AltCLIPVisionConfig(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = projection_dim
SCREAMING_SNAKE_CASE_: Union[str, Any] = logit_scale_init_value
SCREAMING_SNAKE_CASE_: Optional[int] = 1.0
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase__ : AltCLIPTextConfig , lowerCAmelCase__ : AltCLIPVisionConfig , **lowerCAmelCase__ : Dict):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: Any = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_: int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_: Dict = self.__class__.model_type
return output
| 127
|
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Any = len(self.first_signal)
SCREAMING_SNAKE_CASE_: Dict = len(self.second_signal)
SCREAMING_SNAKE_CASE_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__)
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_: List[Any] = [[0] * max_length for i in range(lowerCAmelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase__)
for j, item in enumerate(lowerCAmelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_: Optional[Any] = np.matmul(np.transpose(lowerCAmelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 127
| 1
|
from manim import *
class A_ ( UpperCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase = Rectangle(height=0.25 , width=0.25 )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = Text('''CPU''' , font_size=24 )
__UpperCAmelCase = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = [mem.copy() for i in range(4 )]
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = Text('''GPU''' , font_size=24 )
__UpperCAmelCase = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = Text('''Model''' , font_size=24 )
__UpperCAmelCase = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = []
__UpperCAmelCase = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCAmelCase = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE_ )
model_arr.append(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__UpperCAmelCase = Text('''Disk''' , font_size=24 )
__UpperCAmelCase = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE_ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase = Arrow(start=SCREAMING_SNAKE_CASE_ , end=SCREAMING_SNAKE_CASE_ , color=SCREAMING_SNAKE_CASE_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
__UpperCAmelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__UpperCAmelCase = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase = a_c
__UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ ) , FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , )
__UpperCAmelCase = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.wait()
| 333
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
import os
import sys
import unittest
lowercase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : int = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowercase : Optional[int] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = get_test_to_tester_mapping(snake_case )
lowercase : int = get_test_to_tester_mapping(snake_case )
lowercase : List[str] = {"""BertModelTest""": """BertModelTester"""}
lowercase : Dict = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = get_model_to_test_mapping(snake_case )
lowercase : str = get_model_to_test_mapping(snake_case )
lowercase : Any = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
lowercase : Tuple = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = get_model_to_tester_mapping(snake_case )
lowercase : List[Any] = get_model_to_tester_mapping(snake_case )
lowercase : List[Any] = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
lowercase : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
| 285
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= None
_a : Optional[Any]= BloomTokenizerFast
_a : Tuple= BloomTokenizerFast
_a : str= True
_a : Optional[int]= False
_a : List[Any]= "tokenizer_file"
_a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.get_rust_tokenizer()
lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""]
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[int] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Dict = """This is a simple input"""
lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : Dict = ("""This is a simple input""", """This is a pair""")
lowercase : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase : Optional[int] = None # Hotfixing padding = None
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case )
lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data
lowercase : str = list(sample_data.values() )
lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) )
lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 285
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(_A )
lowercase = torch.sum(_A , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(UpperCamelCase__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(UpperCamelCase__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(UpperCamelCase__) is float) or (type(UpperCamelCase__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
UpperCamelCase__ ,UpperCamelCase__ ,head_mask[i] ,UpperCamelCase__ ,UpperCamelCase__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](UpperCamelCase__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(UpperCamelCase__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __UpperCAmelCase , )
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__):
super().__init__(UpperCamelCase__)
lowercase = config
lowercase = BertEmbeddings(UpperCamelCase__)
lowercase = DeeBertEncoder(UpperCamelCase__)
lowercase = BertPooler(UpperCamelCase__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__)
@add_start_docstrings_to_model_forward(UpperCamelCase__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(UpperCamelCase__ ,device=UpperCamelCase__)
if encoder_attention_mask is None:
lowercase = torch.ones(UpperCamelCase__ ,device=UpperCamelCase__)
if token_type_ids is None:
lowercase = torch.zeros(UpperCamelCase__ ,dtype=torch.long ,device=UpperCamelCase__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(UpperCamelCase__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=UpperCamelCase__ ,position_ids=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,inputs_embeds=UpperCamelCase__)
lowercase = self.encoder(
UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,head_mask=UpperCamelCase__ ,encoder_hidden_states=UpperCamelCase__ ,encoder_attention_mask=UpperCamelCase__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(UpperCamelCase__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(UpperCamelCase__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
lowercase = encoder_outputs[0]
lowercase = self.pooler(UpperCamelCase__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(UpperCamelCase__)
lowercase = self.classifier(UpperCamelCase__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ''' , __UpperCAmelCase , )
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__):
super().__init__(UpperCamelCase__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(UpperCamelCase__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,position_ids=UpperCamelCase__ ,head_mask=UpperCamelCase__ ,inputs_embeds=UpperCamelCase__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(UpperCamelCase__)
lowercase = self.classifier(UpperCamelCase__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(UpperCamelCase__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(UpperCamelCase__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 101
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 278
| 0
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A: Optional[int] = True
except ImportError:
A: str = False
try:
from torch.hub import _get_torch_home
A: Any = _get_torch_home()
except ImportError:
A: str = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
A: Tuple = os.path.join(torch_cache_home, "transformers")
A: int = "https://cdn.huggingface.co"
A: List[Any] = "https://s3.amazonaws.com/models.huggingface.co/bert"
A: Optional[int] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
A: List[str] = os.path.join(PATH, "config.yaml")
A: List[str] = os.path.join(PATH, "attributes.txt")
A: Dict = os.path.join(PATH, "objects.txt")
A: Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
A: Optional[int] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
A: int = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
A: Optional[int] = "pytorch_model.bin"
A: Any = "config.yaml"
def _snake_case ( UpperCamelCase : Optional[Any]=OBJECTS , UpperCamelCase : List[str]=ATTRIBUTES ):
UpperCAmelCase : Union[str, Any] = []
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
UpperCAmelCase : List[Any] = []
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : Dict = OrderedDict()
with open(UpperCamelCase , """rb""" ) as f:
UpperCAmelCase : List[str] = pkl.load(UpperCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCAmelCase : Union[str, Any] = ckp.pop(UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
UpperCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase )
else:
assert isinstance(UpperCamelCase , torch.tensor ), type(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = v
return r
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Dict = {}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = name
UpperCAmelCase : Dict = level
UpperCAmelCase : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCAmelCase : str = copy.deepcopy(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = Config(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE , level=level + 1 )
UpperCAmelCase : Union[str, Any] = v
setattr(self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = d
def __repr__( self ) -> int:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = val
UpperCAmelCase : List[str] = val
UpperCAmelCase : int = key.split(""".""" )
UpperCAmelCase : List[str] = len(_SCREAMING_SNAKE_CASE ) - 1
UpperCAmelCase : str = self._pointer
if len(_SCREAMING_SNAKE_CASE ) > 1:
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if hasattr(self , _SCREAMING_SNAKE_CASE ) and isinstance(getattr(self , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
setattr(getattr(self , _SCREAMING_SNAKE_CASE ) , """.""".join(levels[i:] ) , _SCREAMING_SNAKE_CASE )
if l == last_level:
UpperCAmelCase : Optional[int] = val
else:
UpperCAmelCase : Union[str, Any] = pointer[l]
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return self._pointer
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
with open(F"{file_name}" , """w""" ) as stream:
dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
with open(F"{file_name}" , """w""" ) as stream:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as stream:
UpperCAmelCase : int = load(_SCREAMING_SNAKE_CASE , Loader=_SCREAMING_SNAKE_CASE )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = """ """
if self._name != "root":
UpperCAmelCase : Optional[int] = F"{t * (self._level-1)}{self._name}:\n"
else:
UpperCAmelCase : str = """"""
UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(_SCREAMING_SNAKE_CASE ).__name__})\n"
UpperCAmelCase : str = level
return r[:-1]
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(_SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif os.path.isfile(_SCREAMING_SNAKE_CASE ) or is_remote_url(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = pretrained_model_name_or_path
else:
UpperCAmelCase : Dict = hf_bucket_url(_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_cdn=_SCREAMING_SNAKE_CASE )
try:
# Load from URL or cache if already cached
UpperCAmelCase : Dict = cached_path(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCAmelCase : List[str] = Config.load_yaml(_SCREAMING_SNAKE_CASE )
except EnvironmentError:
UpperCAmelCase : List[str] = """Can't load config for"""
raise EnvironmentError(_SCREAMING_SNAKE_CASE )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_SCREAMING_SNAKE_CASE ), kwargs
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[str] = torch.load("""dump.pt""" , map_location=in_tensor.device )
UpperCAmelCase : int = in_tensor.numpy()
UpperCAmelCase : List[str] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : List[Any] = urlparse(UpperCamelCase )
return parsed.scheme in ("http", "https")
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Any=True ):
UpperCAmelCase : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCAmelCase : Optional[Any] = """/""" not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str]=None , UpperCamelCase : int=0 , UpperCamelCase : List[str]=None , ):
UpperCAmelCase : Dict = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(UpperCamelCase , UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
UpperCAmelCase : str = {"""user-agent""": ua}
if resume_size > 0:
UpperCAmelCase : Optional[Any] = """bytes=%d-""" % (resume_size,)
UpperCAmelCase : List[str] = requests.get(UpperCamelCase , stream=UpperCamelCase , proxies=UpperCamelCase , headers=UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
UpperCAmelCase : Tuple = response.headers.get("""Content-Length""" )
UpperCAmelCase : int = resume_size + int(UpperCamelCase ) if content_length is not None else None
UpperCAmelCase : str = tqdm(
unit="""B""" , unit_scale=UpperCamelCase , total=UpperCamelCase , initial=UpperCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase ) )
temp_file.write(UpperCamelCase )
progress.close()
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=10 , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]=None , UpperCamelCase : str=False , ):
if cache_dir is None:
UpperCAmelCase : Any = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Any = str(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
UpperCAmelCase : Union[str, Any] = None
if not local_files_only:
try:
UpperCAmelCase : str = requests.head(UpperCamelCase , allow_redirects=UpperCamelCase , proxies=UpperCamelCase , timeout=UpperCamelCase )
if response.status_code == 200:
UpperCAmelCase : List[str] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCAmelCase : Dict = url_to_filename(UpperCamelCase , UpperCamelCase )
# get cache path to put the file
UpperCAmelCase : List[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase ):
return cache_path
else:
UpperCAmelCase : Any = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(UpperCamelCase ) > 0:
return os.path.join(UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCAmelCase : List[Any] = cache_path + """.lock"""
with FileLock(UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCAmelCase : str = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase , """a+b""" ) as f:
yield f
UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(UpperCamelCase ):
UpperCAmelCase : Tuple = os.stat(UpperCamelCase ).st_size
else:
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase , delete=UpperCamelCase )
UpperCAmelCase : int = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , UpperCamelCase , temp_file.name , )
http_get(
UpperCamelCase , UpperCamelCase , proxies=UpperCamelCase , resume_size=UpperCamelCase , user_agent=UpperCamelCase , )
os.replace(temp_file.name , UpperCamelCase )
UpperCAmelCase : List[str] = {"""url""": url, """etag""": etag}
UpperCAmelCase : Tuple = cache_path + """.json"""
with open(UpperCamelCase , """w""" ) as meta_file:
json.dump(UpperCamelCase , UpperCamelCase )
return cache_path
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Any=None ):
UpperCAmelCase : Tuple = url.encode("""utf-8""" )
UpperCAmelCase : int = shaaaa(UpperCamelCase )
UpperCAmelCase : List[str] = url_hash.hexdigest()
if etag:
UpperCAmelCase : Dict = etag.encode("""utf-8""" )
UpperCAmelCase : Tuple = shaaaa(UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : str=False , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : str=False , ):
if cache_dir is None:
UpperCAmelCase : Dict = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[Any] = str(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Optional[int] = str(UpperCamelCase )
if is_remote_url(UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
UpperCAmelCase : Union[str, Any] = get_from_cache(
UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , user_agent=UpperCamelCase , local_files_only=UpperCamelCase , )
elif os.path.exists(UpperCamelCase ):
# File, and it exists.
UpperCAmelCase : Tuple = url_or_filename
elif urlparse(UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(UpperCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase ) and not tarfile.is_tarfile(UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCAmelCase : List[str] = os.path.split(UpperCamelCase )
UpperCAmelCase : Tuple = output_file.replace(""".""" , """-""" ) + """-extracted"""
UpperCAmelCase : Dict = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCAmelCase : List[Any] = output_path + """.lock"""
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
os.makedirs(UpperCamelCase )
if is_zipfile(UpperCamelCase ):
with ZipFile(UpperCamelCase , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase ):
UpperCAmelCase : Union[str, Any] = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(UpperCamelCase ) )
return output_path_extracted
return output_path
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Union[str, Any]="," ):
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
with open(UpperCamelCase ) as f:
UpperCAmelCase : Union[str, Any] = eval(f.read() )
else:
UpperCAmelCase : Any = requests.get(UpperCamelCase )
try:
UpperCAmelCase : Any = requests.json()
except Exception:
UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCAmelCase : Optional[Any] = eval(UpperCamelCase )
except Exception:
UpperCAmelCase : Optional[Any] = data.split("""\n""" )
req.close()
return data
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Dict = requests.get(UpperCamelCase )
UpperCAmelCase : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : Union[str, Any] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase )
with open(UpperCamelCase , """rb""" ) as stream:
UpperCAmelCase : Dict = pkl.load(UpperCamelCase )
UpperCAmelCase : List[Any] = weights.pop("""model""" )
UpperCAmelCase : Any = {}
for k, v in model.items():
UpperCAmelCase : int = torch.from_numpy(UpperCamelCase )
if "running_var" in k:
UpperCAmelCase : Tuple = torch.tensor([0] )
UpperCAmelCase : Any = k.replace("""running_var""" , """num_batches_tracked""" )
UpperCAmelCase : Any = zero
return new
def _snake_case ( ):
print(F"{os.path.abspath(os.path.join(UpperCamelCase , os.pardir ) )}/demo.ipynb" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any]="RGB" ):
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
UpperCAmelCase : Tuple = cva.imread(UpperCamelCase )
else:
UpperCAmelCase : Any = get_image_from_url(UpperCamelCase )
assert img is not None, F"could not connect to: {im}"
UpperCAmelCase : Dict = cva.cvtColor(UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=1 ):
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ))
| 353
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 76
| 0
|
import os
import sys
snake_case_ : str = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
snake_case_ : Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A (*__A : Union[str, Any] , **__A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A (*__A : Any , **__A : str ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModel.__doc__ )
def A (*__A : Dict , **__A : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoModel.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A (*__A : Optional[int] , **__A : Tuple ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A (*__A : Optional[int] , **__A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A (*__A : int , **__A : int ) -> List[Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A (*__A : List[Any] , **__A : Any ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
| 51
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51
| 1
|
"""simple docstring"""
import os
import string
import sys
a_ = 1 << 8
a_ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
a_ = KEYMAP["""up"""]
a_ = KEYMAP["""left"""]
if sys.platform == "win32":
a_ = []
a_ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
a_ = ord(str(i))
def __lowercase ( ) ->List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__A : Optional[int] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case_ ) == 0:
# Read the keystroke
__A : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__A : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__A : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(snake_case_ )
if ord(snake_case_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__A : List[Any] = chr(KEYMAP['''esc'''] )
except KeyError:
__A : Any = cha[1]
else:
__A : Union[str, Any] = ch.decode(snake_case_ )
else:
__A : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__A : List[Any] = sys.stdin.fileno()
__A : str = termios.tcgetattr(snake_case_ )
try:
tty.setraw(snake_case_ )
__A : List[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case_ ,termios.TCSADRAIN ,snake_case_ )
return ch
def __lowercase ( ) ->List[Any]:
'''simple docstring'''
__A : List[Any] = get_raw_chars()
if ord(snake_case_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case_ ) == KEYMAP["esc"]:
__A : List[Any] = get_raw_chars()
if ord(snake_case_ ) == KEYMAP["mod_int"]:
__A : str = get_raw_chars()
if ord(snake_case_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 291
|
"""simple docstring"""
import numpy as np
import qiskit
def __lowercase ( snake_case_ : int = 8 ,snake_case_ : int | None = None ) ->str:
'''simple docstring'''
__A : str = np.random.default_rng(seed=snake_case_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__A : str = 6 * key_len
# Measurement basis for Alice's qubits.
__A : Any = rng.integers(2 ,size=snake_case_ )
# The set of states Alice will prepare.
__A : Any = rng.integers(2 ,size=snake_case_ )
# Measurement basis for Bob's qubits.
__A : str = rng.integers(2 ,size=snake_case_ )
# Quantum Circuit to simulate BB84
__A : Dict = qiskit.QuantumCircuit(snake_case_ ,name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case_ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__A : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__A : List[str] = qiskit.execute(snake_case_ ,snake_case_ ,shots=1 ,seed_simulator=snake_case_ )
# Returns the result of measurement.
__A : Union[str, Any] = job.result().get_counts(snake_case_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__A : int = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case_ ,snake_case_ ,snake_case_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__A : Union[str, Any] = gen_key[:key_len] if len(snake_case_ ) >= key_len else gen_key.ljust(snake_case_ ,'''0''' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 291
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : str = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len([g for position, g in enumerate(UpperCamelCase_ ) if g == main_target[position]] )
return (item, float(UpperCamelCase_ ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = random.randint(0 ,len(UpperCamelCase_ ) - 1 )
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(UpperCamelCase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case = random.choice(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
snake_case = []
# Generate more children proportionally to the fitness score.
snake_case = int(parent_a[1] * 1_00 ) + 1
snake_case = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase_ ):
snake_case = population_score[random.randint(0 ,UpperCamelCase_ )][0]
snake_case , snake_case = crossover(parent_a[0] ,UpperCamelCase_ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
return pop
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase_ )
# Generate random starting population.
snake_case = []
for _ in range(UpperCamelCase_ ):
population.append(''''''.join([random.choice(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case = [evaluate(UpperCamelCase_ ,UpperCamelCase_ ) for item in population]
# Check if there is a matching evolution.
snake_case = sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x[1] ,reverse=UpperCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase_ )
# Normalize population score to be between 0 and 1.
snake_case = [
(item, score / len(UpperCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase_ ):
population.extend(select(population_score[int(UpperCamelCase_ )] ,UpperCamelCase_ ,UpperCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 127
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
UpperCamelCase_ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCamelCase )
from datasets import load_dataset
UpperCamelCase_ = load_dataset("""nielsr/rvlcdip-demo""" )
UpperCamelCase_ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
UpperCamelCase_ = image_processor(__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 261
|
from __future__ import annotations
def lowerCamelCase__ ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261
| 1
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase ( lowercase_ ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def a ( self , snake_case , snake_case ):
return (x_in - self.means[key]) / self.stds[key]
def a ( self , snake_case , snake_case ):
return x_in * self.stds[key] + self.means[key]
def a ( self , snake_case ):
if type(snake_case ) is dict:
return {k: self.to_torch(snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case , device=self.unet.device )
def a ( self , snake_case , snake_case , snake_case ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def a ( self , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , snake_case ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(snake_case )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(snake_case , snake_case , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(snake_case , snake_case , snake_case , predict_epsilon=snake_case )['prev_sample']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(snake_case , snake_case , self.action_dim )
snake_case_ = self.to_torch(snake_case )
return x, y
def __call__( self , snake_case , snake_case=64 , snake_case=32 , snake_case=2 , snake_case=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(snake_case , 'observations' )
snake_case_ = obs[None].repeat(snake_case , axis=0 )
snake_case_ = {0: self.to_torch(snake_case )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(snake_case , device=self.unet.device )
snake_case_ = self.reset_xa(snake_case , snake_case , self.action_dim )
snake_case_ = self.to_torch(snake_case )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(snake_case , snake_case , snake_case , snake_case )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=snake_case ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(snake_case , key='actions' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , snake_case )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 285
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ):
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a ( self , snake_case , snake_case = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
snake_case_ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
snake_case_ = text
def a ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def a ( self , snake_case ):
self.generated_responses.append(snake_case )
def a ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
snake_case_ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ):
snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def a ( self , snake_case , snake_case=32 ):
if not isinstance(snake_case , snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a ( self , snake_case , snake_case=10 , **snake_case ):
snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length )
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation' )
snake_case_ = max_length
snake_case_ = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a ( self , snake_case , snake_case=True ):
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def a ( self , snake_case ):
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 285
| 1
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_A = {
"b0": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_2_4,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_4_0,
"dropout_rate": 0.2,
"dw_padding": [1_6],
},
"b2": {
"hidden_dim": 1_4_0_8,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_6_0,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 1_6],
},
"b3": {
"hidden_dim": 1_5_3_6,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_0_0,
"dropout_rate": 0.3,
"dw_padding": [5, 1_8],
},
"b4": {
"hidden_dim": 1_7_9_2,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_8_0,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_0_4_8,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_5_6,
"dropout_rate": 0.4,
"dw_padding": [1_3, 2_7],
},
"b6": {
"hidden_dim": 2_3_0_4,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_2_8,
"dropout_rate": 0.5,
"dw_padding": [3_1],
},
"b7": {
"hidden_dim": 2_5_6_0,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_0_0,
"dropout_rate": 0.5,
"dw_padding": [1_8],
},
}
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = EfficientNetConfig()
lowerCAmelCase__ : Any = CONFIG_MAP[model_name]['hidden_dim']
lowerCAmelCase__ : Any = CONFIG_MAP[model_name]['width_coef']
lowerCAmelCase__ : Optional[int] = CONFIG_MAP[model_name]['depth_coef']
lowerCAmelCase__ : List[Any] = CONFIG_MAP[model_name]['image_size']
lowerCAmelCase__ : Tuple = CONFIG_MAP[model_name]['dropout_rate']
lowerCAmelCase__ : Dict = CONFIG_MAP[model_name]['dw_padding']
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : List[Any] = 1000
lowerCAmelCase__ : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : int = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Tuple = CONFIG_MAP[model_name]['image_size']
lowerCAmelCase__ : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase_ , )
return preprocessor
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowerCAmelCase__ : Tuple = sorted(set(lowerCamelCase_ ) )
lowerCAmelCase__ : List[Any] = len(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )}
lowerCAmelCase__ : Optional[int] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowerCAmelCase__ : Union[str, Any] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowerCAmelCase__ : Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCAmelCase__ : str = 'efficientnet.' + item[1]
lowerCAmelCase__ : Optional[Any] = 'classifier.weight'
lowerCAmelCase__ : List[str] = 'classifier.bias'
return key_mapping
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCAmelCase__ : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCAmelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowerCAmelCase__ : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowerCAmelCase__ : Tuple = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
lowerCAmelCase__ : List[str] = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = model_classes[model_name](
include_top=lowerCamelCase_ , weights="""imagenet""" , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation="""softmax""" , )
lowerCAmelCase__ : int = original_model.trainable_variables
lowerCAmelCase__ : Dict = original_model.non_trainable_variables
lowerCAmelCase__ : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCAmelCase__ : int = param.numpy()
lowerCAmelCase__ : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCAmelCase__ : int = get_efficientnet_config(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
lowerCAmelCase__ : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowerCAmelCase__ : Optional[int] = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
lowerCAmelCase__ : Optional[Any] = convert_image_processor(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = hf_model(**lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[Any] = CONFIG_MAP[model_name]['image_size']
lowerCAmelCase__ : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowerCAmelCase__ : Optional[Any] = image.img_to_array(lowerCamelCase_ )
lowerCAmelCase__ : Any = np.expand_dims(lowerCamelCase_ , axis=0 )
lowerCAmelCase__ : Optional[int] = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
lowerCAmelCase__ : str = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
_A = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 368
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_A = 2_5_6_0_4_7
_A = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Any = NllbTokenizer
_lowerCamelCase :Dict = NllbTokenizerFast
_lowerCamelCase :str = True
_lowerCamelCase :Optional[Any] = True
_lowerCamelCase :Union[str, Any] = {}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Optional[int] = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : Tuple = tokenizer_r.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : int = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : str = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Dict = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_seqaseq:
return
lowerCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowerCAmelCase__ : Any = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCAmelCase__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCAmelCase__ : str = tokenizer.prepare_seqaseq_batch(
UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase__ : int = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : str = [AddedToken("""<special>""" , lstrip=UpperCamelCase )]
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" )
lowerCAmelCase__ : Dict = tokenizer_r.encode("""<special>""" , add_special_tokens=UpperCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.encode("""Hey this is a <special> token""" )
lowerCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :int = "facebook/nllb-200-distilled-600M"
_lowerCamelCase :List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_lowerCamelCase :Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_lowerCamelCase :Tuple = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
lowerCAmelCase__ : Optional[Any] = 1
return cls
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase__ : str = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
lowerCAmelCase__ : Any = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCamelCase )
lowerCAmelCase__ : int = 10
lowerCAmelCase__ : Any = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = NllbTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCAmelCase__ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
self.assertEqual(UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase__ : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase__ : str = targets["""input_ids"""]
lowerCAmelCase__ : Any = shift_tokens_right(
UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 212
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Load checkpoint
UpperCamelCase :int = torch.load(_a , map_location='''cpu''' )
UpperCamelCase :Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase :Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase :List[str] = v
else:
UpperCamelCase :int = v
UpperCamelCase :int = chkpt["params"]
UpperCamelCase :Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase :List[Any] = chkpt["dico_word2id"]
UpperCamelCase :List[Any] = {s + "</w>" if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase :Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase :Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCamelCase :Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_a , _a )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 259
|
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution())
| 76
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
UpperCamelCase__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(SCREAMING_SNAKE_CASE_ )
from datasets import load_dataset
UpperCamelCase__ = load_dataset("""nielsr/rvlcdip-demo""" )
UpperCamelCase__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 371
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase__ = False
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(__a )
UpperCamelCase__ = _re_checkpoint.findall(__a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase__ , UpperCamelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = True
break
UpperCamelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(sorted(__a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 178
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
lowerCamelCase = True if """large""" in model_name or """huge""" in model_name else False
lowerCamelCase = True if """large""" in model_name or """huge""" in model_name else False
lowerCamelCase = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase = [3, 3, 3, 3]
lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase = [4, 4, 4, 4]
lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase = [3, 3, 3, 3]
else:
lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase = 96
elif "small" in model_name:
lowerCamelCase = 96
elif "base" in model_name:
lowerCamelCase = 1_28
elif "large" in model_name:
lowerCamelCase = 1_92
elif "xlarge" in model_name:
lowerCamelCase = 2_56
elif "huge" in model_name:
lowerCamelCase = 3_52
# set label information
lowerCamelCase = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
lowerCamelCase = """imagenet-22k-id2label.json"""
else:
lowerCamelCase = """imagenet-1k-id2label.json"""
lowerCamelCase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCamelCase = {v: k for k, v in idalabel.items()}
lowerCamelCase = FocalNetConfig(
embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , )
return config
def a__ ( snake_case__ ) -> Union[str, Any]:
if "patch_embed.proj" in name:
lowerCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCamelCase = """encoder.""" + name
if "encoder.layers" in name:
lowerCamelCase = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
lowerCamelCase = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
lowerCamelCase = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
lowerCamelCase = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase = """layernorm.bias"""
if "head" in name:
lowerCamelCase = name.replace("""head""" , """classifier""" )
else:
lowerCamelCase = """focalnet.""" + name
return name
def a__ ( snake_case__ , snake_case__ , snake_case__=False ) -> Any:
# fmt: off
lowerCamelCase = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
lowerCamelCase = model_name_to_url[model_name]
print("""Checkpoint URL: """ , snake_case__ )
lowerCamelCase = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase = state_dict.pop(snake_case__ )
lowerCamelCase = val
lowerCamelCase = get_focalnet_config(snake_case__ )
lowerCamelCase = FocalNetForImageClassification(snake_case__ )
model.eval()
# load state dict
model.load_state_dict(snake_case__ )
# verify conversion
lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase = BitImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=2_24 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , )
lowerCamelCase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowerCamelCase = processor(images=snake_case__ , return_tensors="""pt""" )
lowerCamelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase = image_transforms(snake_case__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1E-4 )
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowerCamelCase = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowerCamelCase = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 291
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291
| 1
|
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :List[str] ,__snake_case :Dict=None ,__snake_case :int=1 ,__snake_case :Tuple="binary" ,__snake_case :List[str]=None ) -> Union[str, Any]:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 109
|
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): # noqa: E741
while r - l > 1:
a__ = (l + r) // 2
if v[m] >= key:
a__ = m
else:
a__ = m # noqa: E741
return r
def __lowercase ( __lowerCAmelCase : list[int] ):
if len(__lowerCAmelCase ) == 0:
return 0
a__ = [0] * len(__lowerCAmelCase )
a__ = 1
a__ = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
a__ = v[i]
elif v[i] > tail[length - 1]:
a__ = v[i]
length += 1
else:
a__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _lowerCamelCase( a , a , a ):
__a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__a = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a ):
os.makedirs(a )
__a = model.state_dict()
def to_tf_var_name(a ):
for patt, repl in iter(a ):
__a = name.replace(a , a )
return F"bert/{name}"
def create_tf_var(a , a , a ):
__a = tf.dtypes.as_dtype(tensor.dtype )
__a = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a = to_tf_var_name(a )
__a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a = torch_tensor.T
__a = create_tf_var(tensor=a , name=a , session=a )
tf.keras.backend.set_value(a , a )
__a = session.run(a )
print(F"Successfully created {tf_name}: {np.allclose(a , a )}" )
__a = tf.train.Saver(tf.trainable_variables() )
saver.save(a , os.path.join(a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _lowerCamelCase( a=None ):
__a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a , required=a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a , default=a , required=a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a , required=a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a , required=a , help="Directory in which to save tensorflow model" )
__a = parser.parse_args(a )
__a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 261
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261
| 1
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
UpperCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: Tuple = git.Repo(search_parent_directories=__lowercase )
A: List[str] = {
'''repo_id''': str(__lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__lowercase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__lowercase , __lowercase , indent=4 )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if params.n_gpu <= 0:
A: Dict = 0
A: Any = -1
A: List[str] = True
A: List[Any] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
A: Any = int(os.environ['''WORLD_SIZE'''] )
A: Tuple = int(os.environ['''N_GPU_NODE'''] )
A: int = int(os.environ['''RANK'''] )
# number of nodes / node ID
A: Any = params.world_size // params.n_gpu_per_node
A: List[Any] = params.global_rank // params.n_gpu_per_node
A: Union[str, Any] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
A: Any = 1
A: List[Any] = 0
A: int = 0
A: Any = 0
A: Optional[int] = 1
A: List[str] = 1
A: Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A: str = params.node_id == 0 and params.local_rank == 0
A: List[Any] = params.n_nodes > 1
# summary
A: Tuple = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 334
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger('''transformers.models.encodec''')
UpperCamelCase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCamelCase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCamelCase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCamelCase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCamelCase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase = []
UpperCamelCase = []
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
for attribute in key.split('''.''' ):
A: Union[str, Any] = getattr(__lowercase , __lowercase )
if weight_type is not None:
A: Tuple = getattr(__lowercase , __lowercase ).shape
else:
A: str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A: Dict = value
elif weight_type == "weight_g":
A: Tuple = value
elif weight_type == "weight_v":
A: Any = value
elif weight_type == "bias":
A: str = value
elif weight_type == "running_mean":
A: List[Any] = value
elif weight_type == "running_var":
A: Dict = value
elif weight_type == "num_batches_tracked":
A: List[str] = value
elif weight_type == "weight_ih_l0":
A: Dict = value
elif weight_type == "weight_hh_l0":
A: Optional[int] = value
elif weight_type == "bias_ih_l0":
A: List[Any] = value
elif weight_type == "bias_hh_l0":
A: str = value
elif weight_type == "weight_ih_l1":
A: Optional[int] = value
elif weight_type == "weight_hh_l1":
A: int = value
elif weight_type == "bias_ih_l1":
A: Optional[Any] = value
elif weight_type == "bias_hh_l1":
A: str = value
else:
A: Optional[int] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A , A: Any = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Any = []
if model_name == "encodec_24khz" or "encodec_32khz":
A: List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A: List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
A: Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A , A: Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
A: str = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A: Optional[Any] = True
if "*" in mapped_key:
A: Any = name.split(__lowercase )[0].split('''.''' )[-2]
A: Tuple = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
A: str = '''weight_g'''
elif "weight_v" in name:
A: List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
A: Dict = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A: int = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A: Union[str, Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A: Tuple = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A: int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A: Optional[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A: Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A: str = '''bias_hh_l1'''
elif "bias" in name:
A: Union[str, Any] = '''bias'''
elif "weight" in name:
A: Dict = '''weight'''
elif "running_mean" in name:
A: Tuple = '''running_mean'''
elif "running_var" in name:
A: Any = '''running_var'''
elif "num_batches_tracked" in name:
A: str = '''num_batches_tracked'''
else:
A: Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict:
if config_path is not None:
A: Tuple = EncodecConfig.from_pretrained(__lowercase )
else:
A: Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A: Union[str, Any] = [8, 5, 4, 4]
A: Dict = [2.2]
A: List[Any] = 6_4
A: Optional[Any] = 3_2_0_0_0
A: List[Any] = 2_0_4_8
A: Optional[Any] = False
A: int = False
A: Union[str, Any] = False
elif model_name == "encodec_48khz":
A: Optional[int] = [8, 5, 4, 2]
A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
A: List[Any] = 4_8_0_0_0
A: int = 2
A: List[Any] = False
A: Any = '''time_group_norm'''
A: Optional[Any] = True
A: Any = 1.0
A: Any = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
A: str = EncodecModel(__lowercase )
A: Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
A: Union[str, Any] = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A: Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 334
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase_ = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = TaTokenizer
snake_case = []
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Optional[Any]=100 , __UpperCAmelCase : str=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [f'''<extra_id_{i}>''' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_A = len(set(filter(lambda __UpperCAmelCase : bool("extra_id_" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_A = vocab_file
_A = False if not self.vocab_file else True
_A = extra_ids
@staticmethod
def lowerCAmelCase ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_A = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __UpperCAmelCase , )
return max_model_length
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_A = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(R"<extra_id_\d+>" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 79
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Any = {'unk_token': '<unk>'}
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : str , **a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Dict , **a : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Tuple , a : Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ : List[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : int = tokenizer(a , max_length=len(a ) , padding=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Any = tokenizer(a , padding=a , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , a )
self.assertIn('attention_mask' , a )
self.assertNotIn('labels' , a )
self.assertNotIn('decoder_attention_mask' , a )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Tuple = tokenizer(text_target=a , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : str = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=a , truncation=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ['A long paragraph for summarization.']
lowerCAmelCase__ : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : List[Any] = tokenizer(a , text_target=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs['input_ids']
lowerCAmelCase__ : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Optional[int] = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase__ : int = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
lowerCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 212
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConditionalDetrFeatureExtractor"""]
lowercase = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35
| 1
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = '''bird'''
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(__lowerCAmelCase , jax.device_count() )
lowerCamelCase__ = replicate(__lowerCAmelCase )
lowerCamelCase__ = shard(__lowerCAmelCase )
lowerCamelCase__ = shard(__lowerCAmelCase )
lowerCamelCase__ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=5_0 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ = controlnet_params
lowerCamelCase__ = '''Chef in the kitchen'''
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(__lowerCAmelCase , jax.device_count() )
lowerCamelCase__ = replicate(__lowerCAmelCase )
lowerCamelCase__ = shard(__lowerCAmelCase )
lowerCamelCase__ = shard(__lowerCAmelCase )
lowerCamelCase__ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=5_0 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 209
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __UpperCAmelCase ( a_ , a_ , a_=8):
snake_case_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a , a , ) -> Tuple:
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self , a , a , a , a , a , a ) -> Any:
if latents is None:
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(a )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self , a=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
snake_case_ = torch.device(F'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def _UpperCamelCase ( self , a=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
snake_case_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ) -> Any:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self , a , a , a , a = 5_12 , a = 5_12 , a = 1_00 , a = 4.0 , a = 1 , a = None , a = None , a = "pil" , a = True , ) -> List[str]:
snake_case_ = self._execution_device
snake_case_ = guidance_scale > 1.0
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
snake_case_ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(a , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(a , dim=0 )
snake_case_ = hint.repeat_interleave(a , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
snake_case_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.movq.config.latent_channels
snake_case_ , snake_case_ = downscale_height_and_width(a , a , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {'image_embeds': image_embeds, 'hint': hint}
snake_case_ = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
snake_case_ = self.movq.decode(a , force_not_quantize=a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 178
| 0
|
from string import ascii_uppercase
lowerCAmelCase : Any = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase : Union[str, Any] = dict(enumerate(ascii_uppercase))
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = 0
while True:
if x == i:
SCREAMING_SNAKE_CASE_: List[str] = 0
if len(_UpperCAmelCase ) == len(_UpperCAmelCase ):
break
key += key[i]
i += 1
return key
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = ""
SCREAMING_SNAKE_CASE_: List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
SCREAMING_SNAKE_CASE_: str = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
SCREAMING_SNAKE_CASE_: Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
SCREAMING_SNAKE_CASE_: List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = "THE GERMAN ATTACK"
SCREAMING_SNAKE_CASE_: Union[str, Any] = "SECRET"
SCREAMING_SNAKE_CASE_: Dict = generate_key(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = cipher_text(_UpperCAmelCase , _UpperCAmelCase )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(_UpperCAmelCase , _UpperCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''fnet'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=3_2000 , lowerCAmelCase__ : Union[str, Any]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Tuple=3072 , lowerCAmelCase__ : Any="gelu_new" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=1E-12 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=512 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : List[Any]=2 , **lowerCAmelCase__ : Optional[int] , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_: int = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE_: int = tpu_short_seq_length
| 127
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.